diff --git a/builder/builder-next/adapters/containerimage/pull.go b/builder/builder-next/adapters/containerimage/pull.go index d93e3e27568b9..2e63cab82608b 100644 --- a/builder/builder-next/adapters/containerimage/pull.go +++ b/builder/builder-next/adapters/containerimage/pull.go @@ -18,7 +18,7 @@ import ( ctdreference "github.com/containerd/containerd/reference" "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" - "github.com/containerd/containerd/remotes/docker/schema1" + "github.com/containerd/containerd/remotes/docker/schema1" //nolint:staticcheck // Ignore SA1019: "github.com/containerd/containerd/remotes/docker/schema1" is deprecated: use images formatted in Docker Image Manifest v2, Schema 2, or OCI Image Spec v1. distreference "github.com/docker/distribution/reference" dimages "github.com/docker/docker/daemon/images" "github.com/docker/docker/distribution/metadata" diff --git a/daemon/runtime_unix_test.go b/daemon/runtime_unix_test.go index 570d3536505c3..58090aeee9878 100644 --- a/daemon/runtime_unix_test.go +++ b/daemon/runtime_unix_test.go @@ -10,6 +10,8 @@ import ( "github.com/containerd/containerd/plugin" v2runcoptions "github.com/containerd/containerd/runtime/v2/runc/options" + "github.com/google/go-cmp/cmp/cmpopts" + "google.golang.org/protobuf/proto" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" @@ -141,8 +143,13 @@ func TestGetRuntime(t *testing.T) { stockRuntime, ok := d.configStore.Runtimes[config.StockRuntimeName] assert.Assert(t, ok, "stock runtime could not be found (test needs to be updated)") - configdOpts := *stockRuntime.ShimConfig.Opts.(*v2runcoptions.Options) + configdOpts := proto.Clone(stockRuntime.ShimConfig.Opts.(*v2runcoptions.Options)).(*v2runcoptions.Options) configdOpts.BinaryName = configuredRuntime.Path + wantConfigdRuntime := configuredRuntime + wantConfigdRuntime.ShimConfig = &types.ShimConfig{ + Binary: stockRuntime.ShimConfig.Binary, + Opts: configdOpts, + } for _, tt := range []struct { name, runtime string @@ -227,7 +234,7 @@ func TestGetRuntime(t *testing.T) { t.Run(tt.name, func(t *testing.T) { gotShim, gotOpts, err := d.getRuntime(tt.runtime) assert.Check(t, is.Equal(gotShim, tt.wantShim)) - assert.Check(t, is.DeepEqual(gotOpts, tt.wantOpts)) + assert.Check(t, is.DeepEqual(gotOpts, tt.wantOpts, cmpopts.IgnoreUnexported(v2runcoptions.Options{}))) if tt.wantShim != "" { assert.Check(t, err) } else { diff --git a/libcontainerd/remote/client.go b/libcontainerd/remote/client.go index 23c008d93110f..812723f768259 100644 --- a/libcontainerd/remote/client.go +++ b/libcontainerd/remote/client.go @@ -28,12 +28,14 @@ import ( "github.com/docker/docker/libcontainerd/queue" libcontainerdtypes "github.com/docker/docker/libcontainerd/types" "github.com/docker/docker/pkg/ioutils" + "github.com/opencontainers/go-digest" v1 "github.com/opencontainers/image-spec/specs-go/v1" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" ) // DockerContainerBundlePath is the label key pointing to the container's bundle path @@ -159,7 +161,7 @@ func (c *container) Start(ctx context.Context, checkpointDir string, withStdin b // remove the checkpoint when we're done defer func() { if checkpoint != nil { - err := c.client.client.ContentStore().Delete(ctx, checkpoint.Digest) + err := c.client.client.ContentStore().Delete(ctx, digest.Digest(checkpoint.Digest)) if err != nil { c.client.logger.WithError(err).WithFields(logrus.Fields{ "ref": checkpointDir, @@ -201,10 +203,10 @@ func (c *container) Start(ctx context.Context, checkpointDir string, withStdin b if runtime.GOOS != "windows" { taskOpts = append(taskOpts, func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error { if c.v2runcoptions != nil { - opts := *c.v2runcoptions + opts := proto.Clone(c.v2runcoptions).(*v2runcoptions.Options) opts.IoUid = uint32(uid) opts.IoGid = uint32(gid) - info.Options = &opts + info.Options = opts } return nil }) @@ -333,7 +335,7 @@ func (t *task) Stats(ctx context.Context) (*libcontainerdtypes.Stats, error) { if err != nil { return nil, err } - return libcontainerdtypes.InterfaceToStats(m.Timestamp, v), nil + return libcontainerdtypes.InterfaceToStats(m.Timestamp.AsTime(), v), nil } func (t *task) Summary(ctx context.Context) ([]libcontainerdtypes.Summary, error) { @@ -652,7 +654,7 @@ func (c *client) processEventStream(ctx context.Context, ns string) { ProcessID: t.ID, Pid: t.Pid, ExitCode: t.ExitStatus, - ExitedAt: t.ExitedAt, + ExitedAt: t.ExitedAt.AsTime(), } case *apievents.TaskOOM: et = libcontainerdtypes.EventOOM @@ -720,8 +722,8 @@ func (c *client) writeContent(ctx context.Context, mediaType, ref string, r io.R } return &types.Descriptor{ MediaType: mediaType, - Digest: writer.Digest(), - Size_: size, + Digest: writer.Digest().Encoded(), + Size: size, }, nil } diff --git a/vendor.mod b/vendor.mod index c27c33bfef0af..21336475ae68a 100644 --- a/vendor.mod +++ b/vendor.mod @@ -7,14 +7,14 @@ module github.com/docker/docker go 1.18 require ( - cloud.google.com/go/compute v1.7.0 - cloud.google.com/go/logging v1.4.2 + cloud.google.com/go/compute/metadata v0.2.3 + cloud.google.com/go/logging v1.7.0 code.cloudfoundry.org/clock v1.0.0 - github.com/AdaLogics/go-fuzz-headers v0.0.0-20221118232415-3345c89a7c72 + github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 github.com/Graylog2/go-gelf v0.0.0-20191017102106-1550ee647df0 - github.com/Microsoft/go-winio v0.5.2 - github.com/Microsoft/hcsshim v0.9.7 + github.com/Microsoft/go-winio v0.6.0 + github.com/Microsoft/hcsshim v0.10.0-rc.7 github.com/RackSec/srslog v0.0.0-20180709174129-a4725f04ec91 github.com/aws/aws-sdk-go-v2 v1.16.13 github.com/aws/aws-sdk-go-v2/config v1.17.4 @@ -25,11 +25,11 @@ require ( github.com/bsphere/le_go v0.0.0-20200109081728-fc06dab2caa8 github.com/cloudflare/cfssl v0.0.0-20180323000720-5d63dbd981b5 github.com/containerd/cgroups/v3 v3.0.1 - github.com/containerd/containerd v1.6.19 + github.com/containerd/containerd v1.7.0-rc.2 github.com/containerd/continuity v0.3.0 github.com/containerd/fifo v1.1.0 github.com/containerd/typeurl/v2 v2.1.0 - github.com/coreos/go-systemd/v22 v22.4.0 + github.com/coreos/go-systemd/v22 v22.5.0 github.com/creack/pty v1.1.11 github.com/deckarep/golang-set v0.0.0-20141123011944-ef32fa3046d9 github.com/docker/distribution v2.8.1+incompatible @@ -40,7 +40,7 @@ require ( github.com/docker/libkv v0.2.2-0.20211217103745-e480589147e3 github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4 github.com/fluent/fluent-logger-golang v1.9.0 - github.com/godbus/dbus/v5 v5.0.6 + github.com/godbus/dbus/v5 v5.1.0 github.com/gogo/protobuf v1.3.2 github.com/golang/gddo v0.0.0-20190904175337-72a348e765d2 github.com/google/go-cmp v0.5.9 @@ -51,9 +51,9 @@ require ( github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/memberlist v0.4.0 github.com/hashicorp/serf v0.8.5 - github.com/imdario/mergo v0.3.12 + github.com/imdario/mergo v0.3.13 github.com/ishidawataru/sctp v0.0.0-20210707070123-9a39160e9062 - github.com/klauspost/compress v1.15.12 + github.com/klauspost/compress v1.16.0 github.com/miekg/dns v1.1.43 github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible github.com/moby/buildkit v0.11.4 @@ -70,10 +70,10 @@ require ( github.com/moby/term v0.0.0-20221120202655-abb19827d345 github.com/morikuni/aec v1.0.0 github.com/opencontainers/go-digest v1.0.0 - github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1 - github.com/opencontainers/runc v1.1.3 - github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 - github.com/opencontainers/selinux v1.10.2 + github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b + github.com/opencontainers/runc v1.1.4 + github.com/opencontainers/runtime-spec v1.1.0-rc.1 + github.com/opencontainers/selinux v1.11.0 github.com/pelletier/go-toml v1.9.5 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.14.0 @@ -89,17 +89,21 @@ require ( go.etcd.io/bbolt v1.3.7 golang.org/x/net v0.7.0 golang.org/x/sync v0.1.0 - golang.org/x/sys v0.5.0 + golang.org/x/sys v0.6.0 golang.org/x/text v0.7.0 golang.org/x/time v0.3.0 - google.golang.org/genproto v0.0.0-20220706185917-7780775163c4 - google.golang.org/grpc v1.50.1 + google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 + google.golang.org/grpc v1.53.0 + google.golang.org/protobuf v1.28.1 gotest.tools/v3 v3.4.0 resenje.org/singleflight v0.3.0 ) require ( - cloud.google.com/go v0.102.1 // indirect + cloud.google.com/go v0.110.0 // indirect + cloud.google.com/go/compute v1.18.0 // indirect + cloud.google.com/go/longrunning v0.4.1 // indirect + github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20221215162035-5330a85ea652 // indirect github.com/agext/levenshtein v1.2.3 // indirect github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 // indirect github.com/armon/go-metrics v0.4.1 // indirect @@ -111,19 +115,19 @@ require ( github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.2 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.16.16 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.1.2 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cenkalti/backoff/v4 v4.2.0 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cilium/ebpf v0.9.1 // indirect github.com/container-storage-interface/spec v1.5.0 // indirect - github.com/containerd/cgroups v1.0.4 // indirect + github.com/containerd/cgroups v1.1.0 // indirect github.com/containerd/console v1.0.3 // indirect - github.com/containerd/go-cni v1.1.6 // indirect + github.com/containerd/go-cni v1.1.9 // indirect github.com/containerd/go-runc v1.0.0 // indirect github.com/containerd/nydus-snapshotter v0.3.1 // indirect github.com/containerd/stargz-snapshotter/estargz v0.13.0 // indirect - github.com/containerd/ttrpc v1.1.0 // indirect + github.com/containerd/ttrpc v1.2.0 // indirect github.com/containerd/typeurl v1.0.2 // indirect - github.com/containernetworking/cni v1.1.1 // indirect + github.com/containernetworking/cni v1.1.2 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect github.com/dustin/go-humanize v1.0.0 // indirect @@ -139,11 +143,11 @@ require ( github.com/google/btree v1.1.2 // indirect github.com/google/certificate-transparency-go v1.1.4 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.1.0 // indirect - github.com/googleapis/gax-go/v2 v2.4.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect + github.com/googleapis/gax-go/v2 v2.7.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-msgpack v0.5.5 // indirect github.com/hashicorp/go-sockaddr v1.0.2 // indirect @@ -170,27 +174,27 @@ require ( go.etcd.io/etcd/pkg/v3 v3.5.6 // indirect go.etcd.io/etcd/raft/v3 v3.5.6 // indirect go.etcd.io/etcd/server/v3 v3.5.6 // indirect - go.opencensus.io v0.23.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.29.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0 // indirect - go.opentelemetry.io/otel v1.4.1 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.1 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.4.1 // indirect - go.opentelemetry.io/otel/internal/metric v0.27.0 // indirect - go.opentelemetry.io/otel/metric v0.27.0 // indirect - go.opentelemetry.io/otel/sdk v1.4.1 // indirect - go.opentelemetry.io/otel/trace v1.4.1 // indirect - go.opentelemetry.io/proto/otlp v0.12.0 // indirect + go.opentelemetry.io/otel v1.12.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.12.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.12.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.12.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.12.0 // indirect + go.opentelemetry.io/otel/metric v0.34.0 // indirect + go.opentelemetry.io/otel/sdk v1.12.0 // indirect + go.opentelemetry.io/otel/trace v1.12.0 // indirect + go.opentelemetry.io/proto/otlp v0.19.0 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect golang.org/x/crypto v0.2.0 // indirect - golang.org/x/oauth2 v0.1.0 // indirect - google.golang.org/api v0.93.0 // indirect + golang.org/x/mod v0.7.0 // indirect + golang.org/x/oauth2 v0.5.0 // indirect + golang.org/x/tools v0.5.0 // indirect + google.golang.org/api v0.110.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.28.1 // indirect - k8s.io/klog/v2 v2.80.1 // indirect + k8s.io/klog/v2 v2.90.1 // indirect ) diff --git a/vendor.sum b/vendor.sum index a9122513355e4..2410d46cd315e 100644 --- a/vendor.sum +++ b/vendor.sum @@ -21,42 +21,26 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= -cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= -cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= -cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= -cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= -cloud.google.com/go v0.102.1 h1:vpK6iQWv/2uUeFJth4/cBHsQAGjn1iIE6AAlxipRaA0= -cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= -cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= -cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= -cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= -cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= -cloud.google.com/go/compute v1.7.0 h1:v/k9Eueb8aAJ0vZuxKMrgm6kPhCLZU9HxFU+AFDs9Uk= -cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= -cloud.google.com/go/logging v1.4.2 h1:Mu2Q75VBDQlW1HlBMjTX4X84UFR73G1TiLlRYc/b7tA= -cloud.google.com/go/logging v1.4.2/go.mod h1:jco9QZSx8HiVVqLJReq7z7bVdj0P1Jb9PDFs63T+axo= +cloud.google.com/go/iam v0.12.0 h1:DRtTY29b75ciH6Ov1PHb4/iat2CLCvrOm40Q0a6DFpE= +cloud.google.com/go/logging v1.7.0 h1:CJYxlNNNNAMkHp9em/YEXcfJg+rPDg7YfwoRpMU+t5I= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -66,7 +50,6 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o= code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= code.gitea.io/sdk/gitea v0.12.0/go.mod h1:z3uwDV/b9Ls47NGukYM9XhnHtqPh/J+t40lsUrR6JDY= @@ -78,8 +61,10 @@ contrib.go.opencensus.io/resource v0.1.1/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcig dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20221118232415-3345c89a7c72 h1:kq78byqmxX6R9uk4uN3HD2F5tkZJAZMauuLSkNPS8to= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20221118232415-3345c89a7c72/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1/go.mod h1:VzwV+t+dZ9j/H867F1M2ziD+yLHtB46oM35FxxMJ4d0= +github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20221215162035-5330a85ea652 h1:+vTEFqeoeur6XSq06bs+roX3YiT49gUniJK7Zky7Xjg= +github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20221215162035-5330a85ea652/go.mod h1:OahwfttHWG6eJ0clwcfBAHoDI6X/LV/15hx/wlMZSrU= github.com/AkihiroSuda/containerd-fuse-overlayfs v1.0.0/go.mod h1:0mMDvQFeLbbn1Wy8P2j3hwFhqBq+FKn8OZPno8WLmp8= github.com/Azure/azure-amqp-common-go/v2 v2.1.0/go.mod h1:R8rea+gJRuJR6QxTir/XuEd+YuKoUiazDC/N96FiDEU= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= @@ -100,37 +85,28 @@ github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSW github.com/Azure/go-autorest v10.15.5+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.1.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.10.2/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= -github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= -github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= @@ -143,32 +119,18 @@ github.com/Graylog2/go-gelf v0.0.0-20191017102106-1550ee647df0/go.mod h1:fBaQWrf github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/Microsoft/go-winio v0.4.15-0.20200908182639-5b44b70ab3ab/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/Microsoft/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= -github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= -github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= +github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= github.com/Microsoft/hcsshim v0.8.10/go.mod h1:g5uw8EV2mAlzqe94tfNBNdr89fnbD/n3HV0OhsddkmM= -github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= -github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= -github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= -github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= -github.com/Microsoft/hcsshim v0.9.7 h1:mKNHW/Xvv1aFH87Jb6ERDzXTJTLPlmzfZ28VBFD/bfg= -github.com/Microsoft/hcsshim v0.9.7/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim v0.10.0-rc.7 h1:HBytQPxcv8Oy4244zbQbe6hnOnx544eL5QPUqhJldz8= +github.com/Microsoft/hcsshim v0.10.0-rc.7/go.mod h1:ILuwjA+kNW+MrN/w5un7n3mTqkwsFu4Bp05/okFUZlE= github.com/Microsoft/hcsshim/test v0.0.0-20200826032352-301c83a30e7c/go.mod h1:30A5igQ91GEmhYJF8TaRP79pMBOYynRsyOByfVV0dU4= -github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= -github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= @@ -192,7 +154,6 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -209,7 +170,6 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.15.90/go.mod h1:es1KtYUFs7le0xQ3rOihkuoVD90z7D0fR2Qm4S00/gU= @@ -256,12 +216,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= -github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bombsimon/wsl/v2 v2.0.0/go.mod h1:mf25kr/SqFEPhhcxW1+7pxzGlW+hIl/hYTKY95VwV8U= @@ -272,7 +230,6 @@ github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBT github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/bsphere/le_go v0.0.0-20200109081728-fc06dab2caa8 h1:fcONpniVVbh9+duVZYYbJuc+yGGdLRxTqpk7pTTz/qI= github.com/bsphere/le_go v0.0.0-20200109081728-fc06dab2caa8/go.mod h1:GrjfimWtH8h8EqJSfbO+sTQYV/fAjL/VN7dMeU8XP2Y= -github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= @@ -280,8 +237,8 @@ github.com/caarlos0/ctrlc v1.0.0/go.mod h1:CdXpj4rmq0q/1Eb44M9zi2nKB0QraNKuRGYGr github.com/campoy/unique v0.0.0-20180121183637-88950e537e7e/go.mod h1:9IOqJGCPMSc6E5ydlp5NIonxObaeu/Iub/X03EKPVYo= github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= -github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo= -github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4= +github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= @@ -289,19 +246,16 @@ github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5P github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= -github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= -github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= -github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/cilium/ebpf v0.9.1 h1:64sn2K3UKw8NbP/blsixRpF3nXuyhz/VjRlRzvlBRu4= github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY= @@ -311,13 +265,11 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cloudflare/cfssl v0.0.0-20180323000720-5d63dbd981b5 h1:PqZ3bA4yzwywivzk7PBQWngJp2/PAS0bWRZerKteicY= github.com/cloudflare/cfssl v0.0.0-20180323000720-5d63dbd981b5/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E= @@ -330,138 +282,76 @@ github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58/go.mod h1:sE github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= github.com/container-storage-interface/spec v1.5.0 h1:lvKxe3uLgqQeVQcrnL2CPQKISoKjTJxojEs9cBk+HXo= github.com/container-storage-interface/spec v1.5.0/go.mod h1:8K96oQNkJ7pFcC2R9Z1ynGGBB1I93kcS6PGg3SsOk8s= -github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= -github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= -github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= -github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= -github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= -github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= -github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= -github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= -github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= -github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= -github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= -github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA= -github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA= +github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= github.com/containerd/cgroups/v3 v3.0.1 h1:4hfGvu8rfGIwVIDd+nLzn/B9ZXx4BcCjzt5ToenJRaE= github.com/containerd/cgroups/v3 v3.0.1/go.mod h1:/vtwk1VXrtoa5AaZLkypuOJgA/6DyPMZHJPGQNtlHnw= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= -github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw= github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= -github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.1-0.20201117152358-0edc412565dc/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= -github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= -github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= -github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= -github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= -github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/containerd v1.6.19 h1:F0qgQPrG0P2JPgwpxWxYavrVeXAG0ezUIB9Z/4FTUAU= -github.com/containerd/containerd v1.6.19/go.mod h1:HZCDMn4v/Xl2579/MvtOC2M206i+JJ6VxFWU/NetrGY= +github.com/containerd/containerd v1.7.0-rc.2 h1:Me1LnvqNQPl56LrVpVkNiQPPLomEKgZi2wKbe6ixx6w= +github.com/containerd/containerd v1.7.0-rc.2/go.mod h1:N8hwWslQgbkbPIQulOL9Qgsw5x7VWZppXQLZburjSso= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= -github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= -github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= -github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= -github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= -github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= -github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= -github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY= github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o= github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= -github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= -github.com/containerd/go-cni v1.1.6 h1:el5WPymG5nRRLQF1EfB97FWob4Tdc8INg8RZMaXWZlo= -github.com/containerd/go-cni v1.1.6/go.mod h1:BWtoWl5ghVymxu6MBjg79W9NZrCRyHIdUtk4cauMe34= +github.com/containerd/go-cni v1.1.9 h1:ORi7P1dYzCwVM6XPN4n3CbkuOx/NZ2DOqy+SHRdo9rU= +github.com/containerd/go-cni v1.1.9/go.mod h1:XYrZJ1d5W6E2VOvjffL3IZq0Dz6bsVlERHbekNK90PM= github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= github.com/containerd/go-runc v1.0.0 h1:oU+lLv1ULm5taqgV/CJivypVODI4SUz1znWjv3nNYS0= github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= -github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= -github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= -github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= -github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= -github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= -github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= -github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nydus-snapshotter v0.3.1 h1:b8WahTrPkt3XsabjG2o/leN4fw3HWZYr+qxo/Z8Mfzk= github.com/containerd/nydus-snapshotter v0.3.1/go.mod h1:+8R7NX7vrjlxAgtidnsstwIhpzyTlriYPssTxH++uiM= github.com/containerd/stargz-snapshotter v0.0.0-20201027054423-3a04e4c2c116/go.mod h1:o59b3PCKVAf9jjiKtCc/9hLAd+5p/rfhBfm6aBcTEr4= -github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= github.com/containerd/stargz-snapshotter/estargz v0.13.0 h1:fD7AwuVV+B40p0d9qVkH/Au1qhp8hn/HWJHIYjpEcfw= github.com/containerd/stargz-snapshotter/estargz v0.13.0/go.mod h1:m+9VaGJGlhCnrcEUod8mYumTmRgblwd3rC5UCEh2Yp0= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/ttrpc v1.1.0 h1:GbtyLRxb0gOLR0TYQWt3O6B0NvT8tMdorEHqIQo/lWI= -github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= +github.com/containerd/ttrpc v1.2.0 h1:we4O9wzVsBA1HUVRGU8CWFsbjy2P/U2g9raVu5XXNI0= +github.com/containerd/ttrpc v1.2.0/go.mod h1:YYyNVhZrTMiaf51Vj6WhAJqJw+vl/nzABhj8pWrzle4= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY= github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= github.com/containerd/typeurl/v2 v2.1.0 h1:yNAhJvbNEANt7ck48IlEGOxP7YAp6LLpGn5jZACDNIE= github.com/containerd/typeurl/v2 v2.1.0/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0= -github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= -github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= -github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= -github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/containernetworking/cni v1.1.1 h1:ky20T7c0MvKvbMOwS/FrlbNwjEoqJEUUYfsL4b0mc4k= -github.com/containernetworking/cni v1.1.1/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= -github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= -github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= -github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= -github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= -github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ= +github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= -github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/go-systemd/v22 v22.4.0 h1:y9YHcjnjynCd/DVbg5j9L/33jQM3MxJlbj/zWskzfGU= -github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= @@ -476,10 +366,6 @@ github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= -github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= -github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= -github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -500,7 +386,6 @@ github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop github.com/docker/cli v20.10.0-beta1.0.20201029214301-1d20b15adc38+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.6.0-rc.1.0.20180327202408-83389a148052+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= @@ -513,7 +398,6 @@ github.com/docker/docker v20.10.0-beta1.0.20201110211921-af34b94a78a1+incompatib github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= @@ -544,12 +428,10 @@ github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -562,15 +444,13 @@ github.com/fernet/fernet-go v0.0.0-20211208181803-9f70042a33ee/go.mod h1:2H9hjfb github.com/fluent/fluent-logger-golang v1.9.0 h1:zUdY44CHX2oIUc7VTNZc+4m+ORuO/mldQDA7czhWXEg= github.com/fluent/fluent-logger-golang v1.9.0/go.mod h1:2/HCT/jTy78yGyeNGQLGQsjF3zzzAuy6Xlk6FCMV5eU= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= -github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= @@ -633,27 +513,24 @@ github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2 github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro= github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= +github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/flock v0.7.3/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= github.com/gogo/googleapis v1.3.2/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= -github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -663,6 +540,8 @@ github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w github.com/golang/gddo v0.0.0-20190904175337-72a348e765d2 h1:xisWqjiKEff2B0KfFYGpCqc3M3zdTz+OHQHRc09FeYk= github.com/golang/gddo v0.0.0-20190904175337-72a348e765d2/go.mod h1:xEhNfoBDX1hzLm2Nf80qUvZ2sVwoMZ8d6IE2SrsQfh4= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -678,8 +557,6 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -700,7 +577,6 @@ github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0= @@ -736,7 +612,6 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -747,7 +622,6 @@ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-containerregistry v0.0.0-20191010200024-a3d713f9b7f8/go.mod h1:KyKXa9ciM8+lgMXwOVsXi7UxGrsf9mM61Mzs+xKUrKE= github.com/google/go-containerregistry v0.1.2/go.mod h1:GPivBPgdAyd2SU+vf6EpsgOtWDuPqjW0hJZt4rNdTZ4= -github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= @@ -760,8 +634,6 @@ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -769,14 +641,7 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/rpmpack v0.0.0-20191226140753-aa36bfddb3a0/go.mod h1:RaTPr0KUf2K7fnZYLNDrr8rxAamWs3iNywJLtQ2AzBg= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= @@ -785,28 +650,21 @@ github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3 github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/wire v0.3.0/go.mod h1:i1DMg/Lu8Sz5yYl25iOdmc5CT5qusaa+zmRWs16741s= github.com/google/wire v0.4.0/go.mod h1:ngWDr9Qvq3yZA10YrxfyGELY/AFWGVpy9c1LTRi1EoU= -github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= -github.com/googleapis/enterprise-certificate-proxy v0.1.0 h1:zO8WHNx/MYiAKJ3d5spxZXZE6KHmIQGQcAzwUzV7qQw= -github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= -github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= -github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= -github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= -github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR82awk= -github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.2/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= github.com/gookit/color v1.2.4/go.mod h1:AhIE+pS6D4Ql0SQWbBeXPHw7gY0/sjHoA4s/n1KB7xg= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -840,8 +698,9 @@ github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.2/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hanwen/go-fuse v1.0.0/go.mod h1:unqXarDXqzAk0rt98O2tVndEPIpUgLD9+rwFisZH3Ok= github.com/hanwen/go-fuse/v2 v2.0.3/go.mod h1:0EQM6aH2ctVpvZ6a+onrQ/vaykxh2GH7hy3e13vzTUY= @@ -901,10 +760,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1: github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/in-toto/in-toto-golang v0.5.0 h1:hb8bgwr0M2hGdDsLjkJ3ZqJ8JFLL/tgYdAxF/XEFBbY= github.com/in-toto/in-toto-golang v0.5.0/go.mod h1:/Rq0IZHLV7Ku5gielPT4wPHJfH1GdHMCq8+WPxw8/BE= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -913,7 +770,6 @@ github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLf github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg= github.com/ishidawataru/sctp v0.0.0-20210707070123-9a39160e9062 h1:G1+wBT0dwjIrBdLy0MIG0i+E4CQxEnedHXdauJEIH6g= github.com/ishidawataru/sctp v0.0.0-20210707070123-9a39160e9062/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg= -github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/jaguilar/vt100 v0.0.0-20150826170717-2703a27b14ea/go.mod h1:QMdK4dGB3YhEW2BmA1wgGpPYI3HZy/5gD705PXKUVSg= github.com/jarcoal/httpmock v1.0.5/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= @@ -953,10 +809,9 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM= github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4= +github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -978,7 +833,6 @@ github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LE github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -1004,14 +858,11 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= @@ -1020,7 +871,6 @@ github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= -github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= @@ -1058,7 +908,6 @@ github.com/moby/sys/mount v0.3.3/go.mod h1:PBaEorSNTLG5t/+4EgukEQVlAvVEc6ZjTySwK github.com/moby/sys/mountinfo v0.1.0/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o= github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= -github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= @@ -1066,10 +915,8 @@ github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5 github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= github.com/moby/sys/signal v0.7.0 h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI= github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= -github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= github.com/moby/sys/symlink v0.2.0 h1:tk1rOM+Ljp0nFmfOIBtlV3rTDlWOwFRhjEeAhZB0nZc= github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= -github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2/go.mod h1:TjQg8pa4iejrUrjiz0MCtMV38jdMNW4doKSiBrEvCQQ= github.com/moby/term v0.0.0-20221120202655-abb19827d345 h1:J9c53/kxIH+2nTKBEfZYFMlhghtHpIHSXpm5VRGHSnU= github.com/moby/term v0.0.0-20221120202655-abb19827d345/go.mod h1:15ce4BGCFxt7I5NQKT+HV0yEDxmf6fSysfEDiVo3zFM= @@ -1099,7 +946,6 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -1114,7 +960,6 @@ github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vv github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY= github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= -github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -1123,7 +968,6 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.20.1 h1:PA/3qinGoukvymdIDV8pii6tiZgC8kbmJO6Z5+b002Q= github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= @@ -1131,38 +975,29 @@ github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWEr github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1 h1:9iFHD5Kt9hkOfeawBNiEeEaV7bmC4/Z5wJp8E9BptMs= -github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1/go.mod h1:K/JAU0m27RFhDRX4PcFdIKntROP6y5Ed6O91aZYDQfs= +github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8= +github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc10/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc92/go.mod h1:X1zlU4p7wOlX4+WRCz+hvlRv8phdL7UqbYD+vQwNMmE= -github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= -github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= -github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w= -github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= +github.com/opencontainers/runc v1.1.4 h1:nRCz/8sKg6K6jgYAFLDlXzPeITBZJyX28DBVhWD+5dg= +github.com/opencontainers/runc v1.1.4/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200728170252-4d89ac9fbff6/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.1.0-rc.1 h1:wHa9jroFfKGQqFHj0I1fMRKLl0pfj+ynAqBxo3v6u9w= +github.com/opencontainers/runtime-spec v1.1.0-rc.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= -github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= -github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= -github.com/opencontainers/selinux v1.10.2 h1:NFy2xCsjn7+WspbfZkUd5zyVeisV7VFbPSP96+8/ha4= -github.com/opencontainers/selinux v1.10.2/go.mod h1:cARutUbaUrlRClyvxOICCgKixCs6L05aUsohzA3EkHQ= +github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU= +github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= @@ -1177,7 +1012,6 @@ github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144T github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs= -github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= @@ -1244,7 +1078,6 @@ github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= @@ -1266,14 +1099,12 @@ github.com/rootless-containers/rootlesskit v1.1.0/go.mod h1:H+o9ndNe7tS91WqU0/+v github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryancurrah/gomodguard v1.0.4/go.mod h1:9T/Cfuxs5StfsocWr4WzDL36HqnX0fVb9d5fSEaLhoE= github.com/ryancurrah/gomodguard v1.1.0/go.mod h1:4O8tr7hBODaGE6VIhfJDHcwzh5GUccKSJBU0UMXJFVM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/sassoftware/go-rpmutils v0.0.0-20190420191620-a8f1baeba37b/go.mod h1:am+Fp8Bt506lA3Rk3QCmSqmYmLMnPDhdDUcosQCAx+I= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= @@ -1296,7 +1127,6 @@ github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxr github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -1342,27 +1172,29 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.6.1/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= -github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= github.com/tdakkota/asciicheck v0.0.0-20200416190851-d7f85be797a2/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= github.com/tetafro/godot v0.3.7/go.mod h1:/7NLHhv08H1+8DNj0MElpAACw1ajsCuf3TKNQxA5S+0= @@ -1411,25 +1243,20 @@ github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME= github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= github.com/vdemeester/k8s-pkg-credentialprovider v1.17.4/go.mod h1:inCTmtUdr5KJbreVojo06krnTgaeAz/Z7lynpPk/Q2c= -github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= -github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.2 h1:Cn05BRLm+iRP/DZxyVSsfVyrzgjDbwHwkVt38qvXnNI= github.com/vishvananda/netns v0.0.2/go.mod h1:yitZXdAVI+yPFSb4QUe+VW3vOVl4PZPNcBgbPxAtJxw= github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= -github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/xanzy/go-gitlab v0.31.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug= github.com/xanzy/go-gitlab v0.32.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= @@ -1448,7 +1275,6 @@ go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd/api/v3 v3.5.6/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8= go.etcd.io/etcd/client/pkg/v3 v3.5.6 h1:TXQWYceBKqLp4sa87rcPs11SXxUA/mHwH975v+BDvLU= go.etcd.io/etcd/client/pkg/v3 v3.5.6/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ= @@ -1460,7 +1286,6 @@ go.etcd.io/etcd/raft/v3 v3.5.6 h1:tOmx6Ym6rn2GpZOrvTGJZciJHek6RnC3U/zNInzIN50= go.etcd.io/etcd/raft/v3 v3.5.6/go.mod h1:wL8kkRGx1Hp8FmZUuHfL3K2/OaGIDaXGr1N7i2G07J0= go.etcd.io/etcd/server/v3 v3.5.6 h1:RXuwaB8AMiV62TqcqIt4O4bG8NWjsxOkDJVT3MZI5Ds= go.etcd.io/etcd/server/v3 v3.5.6/go.mod h1:6/Gfe8XTGXQJgLYQ65oGKMfPivb2EASLUSMSWN9Sroo= -go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A= @@ -1470,53 +1295,51 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0/go.mod h1:E5NNboN0UqSAki0Atn9kVwaN7I+l25gGxDqBueo/74E= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0 h1:n9b7AAdbQtQ0k9dm0Dm2/KUcUqtG8i2O15KzNaDze8c= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0/go.mod h1:LsankqVDx4W+RhZNA5uWarULII/MBhF5qwCYxTuyXjs= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0 h1:+uFejS4DCfNH6d3xODVIGsdhzgzhh45p9gpbHQMbdZI= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0/go.mod h1:HSmzQvagH8pS2/xrK7ScWsk0vAMtRTGbMFgInXCi8Tc= go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.29.0 h1:Wjp9vsVSIEyvdiaECfqxY9xBqQ7JaSCGtvHgR4doXZk= go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.29.0/go.mod h1:vHItvsnJtp7ES++nFLLFBzUWny7fJQSvTlxFcqQGUr4= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0 h1:SLme4Porm+UwX0DdHMxlwRt7FzPSE0sys81bet2o0pU= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0/go.mod h1:tLYsuf2v8fZreBVwp9gVMhefZlLFZaUiNVSq8QxXRII= go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU= go.opentelemetry.io/otel v1.4.0/go.mod h1:jeAqMFKy2uLIxCtKxoFj0FAL5zAPKQagc3+GtBWakzk= -go.opentelemetry.io/otel v1.4.1 h1:QbINgGDDcoQUoMJa2mMaWno49lja9sHwp6aoa2n3a4g= -go.opentelemetry.io/otel v1.4.1/go.mod h1:StM6F/0fSwpd8dKWDCdRr7uRvEPYdW0hBSlbdTiUde4= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1 h1:imIM3vRDMyZK1ypQlQlO+brE22I9lRhJsBDXpDWjlz8= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= +go.opentelemetry.io/otel v1.12.0 h1:IgfC7kqQrRccIKuB7Cl+SRUmsKbEwSGPr0Eu+/ht1SQ= +go.opentelemetry.io/otel v1.12.0/go.mod h1:geaoz0L0r1BEOR81k7/n9W4TCXYCJ7bPO7K374jQHG0= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.12.0 h1:UfDENi+LTcLjQ/JhaXimjlIgn7wWjwbEMmdREm2Gyng= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.12.0/go.mod h1:rqbht/LlhVBgn5+k3M5QK96K5Xb0DvXpMJ5SFQpY6uw= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1/go.mod h1:Kv8liBeVNFkkkbilbgWRpV+wWuu+H5xdOT6HAgd30iw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 h1:WPpPsAAs8I2rA47v5u0558meKmmwm1Dj99ZbqCV8sZ8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1/go.mod h1:o5RW5o2pKpJLD5dNTCmjF1DorYwMeFJmb/rKr5sLaa8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.12.0 h1:ZVqtSAxrR4+ofzayuww0/EKamCjjnwnXTMRZzMudJoU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.12.0/go.mod h1:IlaGLENJkAl9+Xoo3J0unkdOwtL+rmqZ3ryMjUtYA94= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1/go.mod h1:xOvWoTOrQjxjW61xtOmD/WKGRYb/P4NzRo3bs65U6Rk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.1 h1:AxqDiGk8CorEXStMDZF5Hz9vo9Z7ZZ+I5m8JRl/ko40= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.1/go.mod h1:c6E4V3/U+miqjs/8l950wggHGL1qzlp0Ypj9xoGrPqo= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.4.1 h1:8qOago/OqoFclMUUj/184tZyRdDZFpcejSjbk5Jrl6Y= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.4.1/go.mod h1:VwYo0Hak6Efuy0TXsZs8o1hnV3dHDPNtDbycG0hI8+M= -go.opentelemetry.io/otel/internal/metric v0.27.0 h1:9dAVGAfFiiEq5NVB9FUJ5et+btbDQAUIJehJ+ikyryk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.12.0 h1:+tsVdWosoqDfX6cdHAeacZozjQS94ySBd+aUXFwnNKA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.12.0/go.mod h1:jSqjV+Knu1Jyvh+l3fx7V210Ev3HHgNQAi8YqpXaQP8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.12.0 h1:L23MzcHDznr05xOM1Ng1F98L0nVd7hm/S7y2jW9IRB4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.12.0/go.mod h1:C+onYX2j5QH653b3wGJwowYr8jLMjBJw35QcaCQQK0U= go.opentelemetry.io/otel/internal/metric v0.27.0/go.mod h1:n1CVxRqKqYZtqyTh9U/onvKapPGv7y/rpyOTI+LFNzw= -go.opentelemetry.io/otel/metric v0.27.0 h1:HhJPsGhJoKRSegPQILFbODU56NS/L1UE4fS1sC5kIwQ= go.opentelemetry.io/otel/metric v0.27.0/go.mod h1:raXDJ7uP2/Jc0nVZWQjJtzoyssOYWu/+pjZqRzfvZ7g= +go.opentelemetry.io/otel/metric v0.34.0 h1:MCPoQxcg/26EuuJwpYN1mZTeCYAUGx8ABxfW07YkjP8= +go.opentelemetry.io/otel/metric v0.34.0/go.mod h1:ZFuI4yQGNCupurTXCwkeD/zHBt+C2bR7bw5JqUm/AP8= go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI= -go.opentelemetry.io/otel/sdk v1.4.1 h1:J7EaW71E0v87qflB4cDolaqq3AcujGrtyIPGQoZOB0Y= -go.opentelemetry.io/otel/sdk v1.4.1/go.mod h1:NBwHDgDIBYjwK2WNu1OPgsIc2IJzmBXNnvIJxJc8BpE= +go.opentelemetry.io/otel/sdk v1.12.0 h1:8npliVYV7qc0t1FKdpU08eMnOjgPFMnriPhn0HH4q3o= +go.opentelemetry.io/otel/sdk v1.12.0/go.mod h1:WYcvtgquYvgODEvxOry5owO2y9MyciW7JqMz6cpXShE= go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk= go.opentelemetry.io/otel/trace v1.4.0/go.mod h1:uc3eRsqDfWs9R7b92xbQbU42/eTNz4N+gLP8qJCi4aE= -go.opentelemetry.io/otel/trace v1.4.1 h1:O+16qcdTrT7zxv2J6GejTPFinSwA++cYerC5iSiF8EQ= -go.opentelemetry.io/otel/trace v1.4.1/go.mod h1:iYEVbroFCNut9QkwEczV9vMRPHNKSSwYZjulEtsmhFc= +go.opentelemetry.io/otel/trace v1.12.0 h1:p28in++7Kd0r2d8gSt931O57fdjUyWxkVbESuILAeUc= +go.opentelemetry.io/otel/trace v1.12.0/go.mod h1:pHlgBynn6s25qJ2szD+Bv+iwKJttjHSI3lUAyf0GNuQ= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= -go.opentelemetry.io/proto/otlp v0.12.0 h1:CMJ/3Wp7iOWES+CYLfnBv+DVmPbB+kmy9PJ92XvlR6c= -go.opentelemetry.io/proto/otlp v0.12.0/go.mod h1:TsIjwGWIx5VFYv9KGVlOpxoBl5Dy+63SUguV7GGvlSQ= +go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= -go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= @@ -1530,7 +1353,6 @@ gocloud.dev v0.19.0/go.mod h1:SmKwiR8YwIMMJvQBKLsC3fHNyMwXLw3PMDO+VVteJMI= golang.org/x/build v0.0.0-20190314133821-5284462c4bec/go.mod h1:atTaCNAy0f16Ah5aV1gMSwgiKVHwu/JncqDpuRr7lS4= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1540,17 +1362,13 @@ golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.2.0 h1:BRXPfhNivWL5Yq0BGQ39a2sW6t44aODpfxkWjYdzewE= golang.org/x/crypto v0.2.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= @@ -1580,7 +1398,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -1590,15 +1407,14 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1640,29 +1456,17 @@ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180724155351-3d292e4d0cdc/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1675,25 +1479,11 @@ golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= -golang.org/x/oauth2 v0.1.0 h1:isLCZuhj4v+tYv7eskaN4v/TM+A1begWWgyVJDdl1+Y= -golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A= +golang.org/x/oauth2 v0.5.0 h1:HuArIo48skDwlrvM3sEdHXElYslAMsf3KwRkkW4MC4s= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1706,7 +1496,6 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1730,7 +1519,6 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1740,7 +1528,6 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1778,69 +1565,37 @@ golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200917073148-efd3b9a0ff20/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201013081832-0aaa2718063a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1851,7 +1606,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= @@ -1862,7 +1616,6 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1933,37 +1686,25 @@ golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200502202811-ed308ab3e770/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.5.0 h1:+bSpV5HIeWkuvgaMfI3UmKRThoTA5ODJTUd8T17NO+4= +golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= -golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= @@ -1993,32 +1734,8 @@ google.golang.org/api v0.25.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= -google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= -google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= -google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= -google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= -google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= -google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= -google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= -google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= -google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= -google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= -google.golang.org/api v0.93.0 h1:T2xt9gi0gHdxdnRkVQhT8mIvPaXKNsDNWz+L696M66M= -google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.110.0 h1:l+rh0KYUooe9JGbGVx71tbFo4SMbMTXK3I3ia2QSEeU= +google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2039,7 +1756,6 @@ google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= @@ -2070,61 +1786,10 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210517163617-5e0236093d7a/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= -google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= -google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= -google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= -google.golang.org/genproto v0.0.0-20220706185917-7780775163c4 h1:7YDGQC/0sigNGzsEWyb9s72jTxlFdwVEYNJHbfQ+Dtg= -google.golang.org/genproto v0.0.0-20220706185917-7780775163c4/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= @@ -2135,7 +1800,6 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -2144,30 +1808,16 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= -google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= -google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY= -google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -2181,7 +1831,6 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= @@ -2203,8 +1852,6 @@ gopkg.in/ini.v1 v1.56.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= @@ -2221,11 +1868,11 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= @@ -2242,60 +1889,36 @@ honnef.co/go/tools v0.0.1-2020.1.5/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 k8s.io/api v0.0.0-20180904230853-4e7be11eab3f/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= k8s.io/api v0.17.4/go.mod h1:5qxx6vjmwUVG2nHQTKGlLts8Tbok8PzHl4vHtVFuZCA= k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= -k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= -k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= -k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/apimachinery v0.0.0-20180904193909-def12e63c512/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= k8s.io/apimachinery v0.17.4/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g= k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= -k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= -k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= k8s.io/apiserver v0.17.4/go.mod h1:5ZDQ6Xr5MNBxyi3iUZXS84QOhZl+W7Oq2us/29c0j9I= -k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= -k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= -k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= k8s.io/client-go v0.0.0-20180910083459-2cefa64ff137/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= k8s.io/client-go v0.17.4/go.mod h1:ouF6o5pz3is8qU0/qYL2RnoxOPqgfuidYLowytyLJmc= k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU= -k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= -k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= -k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= k8s.io/cloud-provider v0.17.4/go.mod h1:XEjKDzfD+b9MTLXQFlDGkk6Ho8SGMpaU8Uugx/KNK9U= k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= -k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= k8s.io/component-base v0.17.4/go.mod h1:5BRqHMbbQPm2kKu35v3G+CpVq4K0RJKC7TRioF0I9lE= -k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= -k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= -k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= -k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= -k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= -k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/csi-translation-lib v0.17.4/go.mod h1:CsxmjwxEI0tTNMzffIAcgR9lX4wOh6AKHdxQrT7L0oo= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= -k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= +k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= -k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kubernetes v1.11.10/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/legacy-cloud-providers v0.17.4/go.mod h1:FikRNoD64ECjkxO36gkDgJeiQWwyZTuBkhu+yxOc1Js= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= @@ -2311,13 +1934,9 @@ resenje.org/singleflight v0.3.0/go.mod h1:lAgQK7VfjG6/pgredbQfmV0RvG/uVhKo6vSuZ0 rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/vendor/cloud.google.com/go/.release-please-manifest-individual.json b/vendor/cloud.google.com/go/.release-please-manifest-individual.json new file mode 100644 index 0000000000000..3a9f0082017f7 --- /dev/null +++ b/vendor/cloud.google.com/go/.release-please-manifest-individual.json @@ -0,0 +1,13 @@ +{ + "bigquery": "1.46.0", + "bigtable": "1.18.1", + "datastore": "1.10.0", + "errorreporting": "0.3.0", + "firestore": "1.9.0", + "logging": "1.6.1", + "profiler": "0.3.1", + "pubsub": "1.28.0", + "pubsublite": "1.6.0", + "spanner": "1.44.0", + "storage": "1.29.0" +} diff --git a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json index 00b99da4a04cc..50a4d12bca0d0 100644 --- a/vendor/cloud.google.com/go/.release-please-manifest-submodules.json +++ b/vendor/cloud.google.com/go/.release-please-manifest-submodules.json @@ -1,107 +1,116 @@ { - "accessapproval": "1.3.0", - "accesscontextmanager": "1.2.0", - "aiplatform": "1.13.0", - "analytics": "0.7.0", - "apigateway": "1.2.0", - "apigeeconnect": "1.2.0", - "appengine": "1.3.0", - "area120": "0.3.0", - "artifactregistry": "1.3.0", - "asset": "1.2.0", - "assuredworkloads": "0.6.0", - "automl": "1.3.0", - "baremetalsolution": "0.1.0", - "batch": "0.1.0", - "billing": "1.2.0", - "binaryauthorization": "0.6.0", - "certificatemanager": "0.2.0", - "channel": "1.6.0", - "cloudbuild": "1.2.0", - "clouddms": "1.2.0", - "cloudtasks": "1.3.0", - "compute": "1.7.0", - "contactcenterinsights": "1.2.0", - "container": "1.2.0", - "containeranalysis": "0.3.0", - "datacatalog": "1.3.0", - "dataflow": "0.4.0", - "datafusion": "1.3.0", - "datalabeling": "0.3.0", - "dataplex": "0.4.0", - "dataproc": "1.5.0", - "dataqna": "0.3.0", - "datastream": "0.5.0", - "deploy": "1.2.0", - "dialogflow": "1.10.0", - "dlp": "1.4.0", - "documentai": "1.4.0", - "domains": "0.4.0", - "essentialcontacts": "1.2.0", - "eventarc": "1.6.0", - "filestore": "1.2.0", - "functions": "1.4.0", - "gaming": "1.2.0", - "gkebackup": "0.1.0", - "gkeconnect": "0.3.0", - "gkehub": "0.7.0", - "gkemulticloud": "0.2.0", + "accessapproval": "1.6.0", + "accesscontextmanager": "1.6.0", + "aiplatform": "1.34.0", + "analytics": "0.17.0", + "apigateway": "1.5.0", + "apigeeconnect": "1.5.0", + "apigeeregistry": "0.3.0", + "apikeys": "0.3.0", + "appengine": "1.6.0", + "area120": "0.7.0", + "artifactregistry": "1.11.0", + "asset": "1.11.1", + "assuredworkloads": "1.10.0", + "automl": "1.12.0", + "baremetalsolution": "0.5.0", + "batch": "0.7.0", + "beyondcorp": "0.4.0", + "billing": "1.12.0", + "binaryauthorization": "1.5.0", + "certificatemanager": "1.6.0", + "channel": "1.11.0", + "cloudbuild": "1.6.0", + "clouddms": "1.5.0", + "cloudtasks": "1.9.0", + "compute": "1.18.0", + "compute/metadata": "0.2.3", + "contactcenterinsights": "1.6.0", + "container": "1.13.1", + "containeranalysis": "0.7.0", + "datacatalog": "1.12.0", + "dataflow": "0.8.0", + "dataform": "0.6.0", + "datafusion": "1.6.0", + "datalabeling": "0.7.0", + "dataplex": "1.5.2", + "dataproc": "1.12.0", + "dataqna": "0.7.0", + "datastream": "1.6.0", + "deploy": "1.6.0", + "dialogflow": "1.27.0", + "dlp": "1.9.0", + "documentai": "1.15.0", + "domains": "0.8.0", + "edgecontainer": "0.3.0", + "essentialcontacts": "1.5.0", + "eventarc": "1.10.0", + "filestore": "1.5.0", + "functions": "1.10.0", + "gaming": "1.9.0", + "gkebackup": "0.4.0", + "gkeconnect": "0.7.0", + "gkehub": "0.11.0", + "gkemulticloud": "0.5.0", "grafeas": "0.2.0", - "gsuiteaddons": "1.2.0", - "iam": "0.3.0", - "iap": "1.3.0", - "ids": "0.3.0", - "iot": "1.2.0", - "kms": "1.4.0", - "language": "1.2.0", - "lifesciences": "0.3.0", - "managedidentities": "1.2.0", - "mediatranslation": "0.3.0", - "memcache": "1.2.0", - "metastore": "1.2.0", - "monitoring": "1.5.0", - "networkconnectivity": "1.2.0", - "networkmanagement": "1.2.0", - "networksecurity": "0.3.0", - "notebooks": "0.4.0", - "optimization": "0.1.0", - "orchestration": "1.2.0", - "orgpolicy": "1.3.0", - "osconfig": "1.5.0", - "oslogin": "1.2.0", - "phishingprotection": "0.3.0", - "policytroubleshooter": "1.2.0", - "privatecatalog": "0.3.0", - "recaptchaenterprise/v2": "2.0.1", - "recommendationengine": "0.2.0", - "recommender": "1.3.0", - "redis": "1.5.0", - "resourcemanager": "1.2.0", - "resourcesettings": "1.2.0", - "retail": "1.4.0", - "run": "0.1.1", - "scheduler": "1.2.0", - "secretmanager": "1.4.0", - "security": "1.4.0", - "securitycenter": "1.8.0", - "servicecontrol": "1.3.0", - "servicedirectory": "1.2.0", - "servicemanagement": "1.3.0", - "serviceusage": "1.2.0", - "shell": "1.2.0", - "speech": "1.4.0", - "storagetransfer": "1.3.0", - "talent": "0.8.0", - "texttospeech": "1.3.0", - "tpu": "1.2.0", - "trace": "1.2.0", - "translate": "1.2.0", - "video": "1.6.0", - "videointelligence": "1.2.0", - "vision/v2": "2.0.0", - "vmmigration": "0.3.0", - "vpcaccess": "1.2.0", - "webrisk": "1.2.0", - "websecurityscanner": "1.2.0", - "workflows": "1.4.0" + "gsuiteaddons": "1.5.0", + "iam": "0.10.0", + "iap": "1.6.0", + "ids": "1.3.0", + "iot": "1.5.0", + "kms": "1.8.0", + "language": "1.9.0", + "lifesciences": "0.8.0", + "longrunning": "0.4.1", + "managedidentities": "1.5.0", + "maps": "0.6.0", + "mediatranslation": "0.7.0", + "memcache": "1.9.0", + "metastore": "1.10.0", + "monitoring": "1.12.0", + "networkconnectivity": "1.10.0", + "networkmanagement": "1.6.0", + "networksecurity": "0.7.0", + "notebooks": "1.7.0", + "optimization": "1.3.1", + "orchestration": "1.6.0", + "orgpolicy": "1.10.0", + "osconfig": "1.11.0", + "oslogin": "1.9.0", + "phishingprotection": "0.7.0", + "policytroubleshooter": "1.5.0", + "privatecatalog": "0.7.0", + "recaptchaenterprise/v2": "2.6.0", + "recommendationengine": "0.7.0", + "recommender": "1.9.0", + "redis": "1.11.0", + "resourcemanager": "1.5.0", + "resourcesettings": "1.5.0", + "retail": "1.12.0", + "run": "0.8.0", + "scheduler": "1.8.0", + "secretmanager": "1.10.0", + "security": "1.12.0", + "securitycenter": "1.18.1", + "servicecontrol": "1.10.0", + "servicedirectory": "1.8.0", + "servicemanagement": "1.6.0", + "serviceusage": "1.5.0", + "shell": "1.6.0", + "speech": "1.14.1", + "storagetransfer": "1.7.0", + "talent": "1.5.0", + "texttospeech": "1.6.0", + "tpu": "1.5.0", + "trace": "1.8.0", + "translate": "1.5.0", + "video": "1.12.0", + "videointelligence": "1.10.0", + "vision/v2": "2.6.0", + "vmmigration": "1.5.0", + "vmwareengine": "0.2.2", + "vpcaccess": "1.6.0", + "webrisk": "1.8.0", + "websecurityscanner": "1.5.0", + "workflows": "1.10.0" } diff --git a/vendor/cloud.google.com/go/.release-please-manifest.json b/vendor/cloud.google.com/go/.release-please-manifest.json index 52eec6a30755e..d0779411557d0 100644 --- a/vendor/cloud.google.com/go/.release-please-manifest.json +++ b/vendor/cloud.google.com/go/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.102.1" + ".": "0.110.0" } diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md index 0ced42279c60b..34209ce0c1d18 100644 --- a/vendor/cloud.google.com/go/CHANGES.md +++ b/vendor/cloud.google.com/go/CHANGES.md @@ -1,5 +1,67 @@ # Changes +## [0.110.0](https://github.com/googleapis/google-cloud-go/compare/v0.109.0...v0.110.0) (2023-02-15) + + +### Features + +* **internal/postprocessor:** Detect and initialize new modules ([#7288](https://github.com/googleapis/google-cloud-go/issues/7288)) ([59ce02c](https://github.com/googleapis/google-cloud-go/commit/59ce02c13f265741a8f1f0f7ad5109bf83e3df82)) +* **internal/postprocessor:** Only regen snippets for changed modules ([#7300](https://github.com/googleapis/google-cloud-go/issues/7300)) ([220f8a5](https://github.com/googleapis/google-cloud-go/commit/220f8a5ad2fd64b75c5a1af531b1ab4597cf17d7)) + + +### Bug Fixes + +* **internal/postprocessor:** Add scopes without OwlBot api-name feature ([#7404](https://github.com/googleapis/google-cloud-go/issues/7404)) ([f7fe4f6](https://github.com/googleapis/google-cloud-go/commit/f7fe4f68ebf2ca28efd282f3419329dd2c09d245)) +* **internal/postprocessor:** Include module and package in scope ([#7294](https://github.com/googleapis/google-cloud-go/issues/7294)) ([d2c5c84](https://github.com/googleapis/google-cloud-go/commit/d2c5c8449f6939301f0fd506282e8fc73fc84f96)) + +## [0.109.0](https://github.com/googleapis/google-cloud-go/compare/v0.108.0...v0.109.0) (2023-01-18) + + +### Features + +* **internal/postprocessor:** Make OwlBot postprocessor ([#7202](https://github.com/googleapis/google-cloud-go/issues/7202)) ([7a1022e](https://github.com/googleapis/google-cloud-go/commit/7a1022e215261d679c8496cdd35a9cad1f13e527)) + +## [0.108.0](https://github.com/googleapis/google-cloud-go/compare/v0.107.0...v0.108.0) (2023-01-05) + + +### Features + +* **all:** Enable REGAPIC and REST numeric enums ([#6999](https://github.com/googleapis/google-cloud-go/issues/6999)) ([28f3572](https://github.com/googleapis/google-cloud-go/commit/28f3572addb0f563a2a42a76977b4e083191613f)) +* **debugger:** Add REST client ([06a54a1](https://github.com/googleapis/google-cloud-go/commit/06a54a16a5866cce966547c51e203b9e09a25bc0)) + + +### Bug Fixes + +* **internal/gapicgen:** Disable rest for non-rest APIs ([#7157](https://github.com/googleapis/google-cloud-go/issues/7157)) ([ab332ce](https://github.com/googleapis/google-cloud-go/commit/ab332ced06f6c07909444e4528c02a8b6a0a70a6)) + +## [0.107.0](https://github.com/googleapis/google-cloud-go/compare/v0.106.0...v0.107.0) (2022-11-15) + + +### Features + +* **routing:** Start generating apiv2 ([#7011](https://github.com/googleapis/google-cloud-go/issues/7011)) ([66e8e27](https://github.com/googleapis/google-cloud-go/commit/66e8e2717b2593f4e5640ecb97344bb1d5e5fc0b)) + +## [0.106.0](https://github.com/googleapis/google-cloud-go/compare/v0.105.0...v0.106.0) (2022-11-09) + + +### Features + +* **debugger:** rewrite signatures in terms of new location ([3c4b2b3](https://github.com/googleapis/google-cloud-go/commit/3c4b2b34565795537aac1661e6af2442437e34ad)) + +## [0.104.0](https://github.com/googleapis/google-cloud-go/compare/v0.103.0...v0.104.0) (2022-08-24) + + +### Features + +* **godocfx:** add friendlyAPIName ([#6447](https://github.com/googleapis/google-cloud-go/issues/6447)) ([c6d3ba4](https://github.com/googleapis/google-cloud-go/commit/c6d3ba401b7b3ae9b710a8850c6ec5d49c4c1490)) + +## [0.103.0](https://github.com/googleapis/google-cloud-go/compare/v0.102.1...v0.103.0) (2022-06-29) + + +### Features + +* **privateca:** temporarily remove REGAPIC support ([199b725](https://github.com/googleapis/google-cloud-go/commit/199b7250f474b1a6f53dcf0aac0c2966f4987b68)) + ## [0.102.1](https://github.com/googleapis/google-cloud-go/compare/v0.102.0...v0.102.1) (2022-06-17) diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md index 669cc7532702b..6ce222dccac6c 100644 --- a/vendor/cloud.google.com/go/README.md +++ b/vendor/cloud.google.com/go/README.md @@ -35,10 +35,10 @@ For an updated list of all of our released APIs please see our Our libraries are compatible with at least the three most recent, major Go releases. They are currently compatible with: +- Go 1.20 +- Go 1.19 - Go 1.18 - Go 1.17 -- Go 1.16 -- Go 1.15 ## Authorization diff --git a/vendor/cloud.google.com/go/compute/internal/version.go b/vendor/cloud.google.com/go/compute/internal/version.go new file mode 100644 index 0000000000000..ddddbd21f21f1 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/internal/version.go @@ -0,0 +1,18 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +// Version is the current tagged release of the library. +const Version = "1.18.0" diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md new file mode 100644 index 0000000000000..06b957349afd5 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -0,0 +1,19 @@ +# Changes + +## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.2...compute/metadata/v0.2.3) (2022-12-15) + + +### Bug Fixes + +* **compute/metadata:** Switch DNS lookup to an absolute lookup ([119b410](https://github.com/googleapis/google-cloud-go/commit/119b41060c7895e45e48aee5621ad35607c4d021)), refs [#7165](https://github.com/googleapis/google-cloud-go/issues/7165) + +## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.1...compute/metadata/v0.2.2) (2022-12-01) + + +### Bug Fixes + +* **compute/metadata:** Set IdleConnTimeout for http.Client ([#7084](https://github.com/googleapis/google-cloud-go/issues/7084)) ([766516a](https://github.com/googleapis/google-cloud-go/commit/766516aaf3816bfb3159efeea65aa3d1d205a3e2)), refs [#5430](https://github.com/googleapis/google-cloud-go/issues/5430) + +## [0.1.0] (2022-10-26) + +Initial release of metadata being it's own module. diff --git a/vendor/cloud.google.com/go/compute/metadata/LICENSE b/vendor/cloud.google.com/go/compute/metadata/LICENSE new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/compute/metadata/README.md b/vendor/cloud.google.com/go/compute/metadata/README.md new file mode 100644 index 0000000000000..f940fb2c85b83 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/README.md @@ -0,0 +1,27 @@ +# Compute API + +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/compute.svg)](https://pkg.go.dev/cloud.google.com/go/compute/metadata) + +This is a utility library for communicating with Google Cloud metadata service +on Google Cloud. + +## Install + +```bash +go get cloud.google.com/go/compute/metadata +``` + +## Go Version Support + +See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported) +section in the root directory's README. + +## Contributing + +Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. See +[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index 1405d09674685..c17faa142a44f 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -70,7 +70,9 @@ func newDefaultHTTPClient() *http.Client { Timeout: 2 * time.Second, KeepAlive: 30 * time.Second, }).Dial, + IdleConnTimeout: 60 * time.Second, }, + Timeout: 5 * time.Second, } } @@ -145,7 +147,7 @@ func testOnGCE() bool { go func() { resolver := &net.Resolver{} - addrs, err := resolver.LookupHost(ctx, "metadata.google.internal") + addrs, err := resolver.LookupHost(ctx, "metadata.google.internal.") if err != nil || len(addrs) == 0 { resc <- false return diff --git a/vendor/cloud.google.com/go/compute/metadata/tidyfix.go b/vendor/cloud.google.com/go/compute/metadata/tidyfix.go new file mode 100644 index 0000000000000..4cef48500817c --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/tidyfix.go @@ -0,0 +1,23 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the {{.RootMod}} import, won't actually become part of +// the resultant binary. +//go:build modhack +// +build modhack + +package metadata + +// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "cloud.google.com/go/compute/internal" diff --git a/vendor/cloud.google.com/go/debug.md b/vendor/cloud.google.com/go/debug.md new file mode 100644 index 0000000000000..f2a608ce24d77 --- /dev/null +++ b/vendor/cloud.google.com/go/debug.md @@ -0,0 +1,162 @@ +# Debugging tips and tricks + +While working with the Go Client libraries you may run into some situations +where you need a deeper level of understanding about what is going on in order +to solve your problem. Here are some tips and tricks that you can use in these +cases. *Note* that many of the tips in this document will have a performance +impact and are therefore not recommended for sustained production use. Use these +tips locally or in production for a *limited time* to help get a better +understanding of what is going on. + +## HTTP based clients + +All of our auto-generated clients have a constructor to create a client that +uses HTTP/JSON instead of gRPC. Additionally a couple of our hand-written +clients like Storage and Bigquery are also HTTP based. Here are some tips for +debugging these clients. + +### Try setting Go's HTTP debug variable + +Try setting the following environment variable for verbose Go HTTP logging: +GODEBUG=http2debug=1. To read more about this feature please see the godoc for +[net/http](https://pkg.go.dev/net/http). + +*WARNING*: Enabling this debug variable will log headers and payloads which may +contain private information. + +### Add in your own logging with an HTTP middleware + +You may want to add in your own logging around HTTP requests. One way to do this +is to register a custom HTTP client with a logging transport built in. Here is +an example of how you would do this with the storage client. + +*WARNING*: Adding this middleware will log headers and payloads which may +contain private information. + +```go +package main + +import ( + "context" + "fmt" + "log" + "net/http" + "net/http/httputil" + + "cloud.google.com/go/storage" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + htransport "google.golang.org/api/transport/http" +) + +type loggingRoundTripper struct { + rt http.RoundTripper +} + +func (d loggingRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { + // Will create a dump of the request and body. + dump, err := httputil.DumpRequest(r, true) + if err != nil { + log.Println("error dumping request") + } + log.Printf("%s", dump) + return d.rt.RoundTrip(r) +} + +func main() { + ctx := context.Background() + + // Create a transport with authentication built-in detected with + // [ADC](https://google.aip.dev/auth/4110). Note you will have to pass any + // required scoped for the client you are using. + trans, err := htransport.NewTransport(ctx, + http.DefaultTransport, + option.WithScopes(storage.ScopeFullControl), + ) + if err != nil { + log.Fatal(err) + } + + // Embed customized transport into an HTTP client. + hc := &http.Client{ + Transport: loggingRoundTripper{rt: trans}, + } + + // Supply custom HTTP client for use by the library. + client, err := storage.NewClient(ctx, option.WithHTTPClient(hc)) + if err != nil { + log.Fatal(err) + } + defer client.Close() + // Use the client +} +``` + +## gRPC based clients + +### Try setting grpc-go's debug variables + +Try setting the following environment variables for grpc-go: +`GRPC_GO_LOG_VERBOSITY_LEVEL=99` `GRPC_GO_LOG_SEVERITY_LEVEL=info`. These are +good for diagnosing connection level failures. For more information please see +[grpc-go's debug documentation](https://pkg.go.dev/google.golang.org/grpc/examples/features/debugging#section-readme). + +### Add in your own logging with a gRPC interceptors + +You may want to add in your own logging around gRPC requests. One way to do this +is to register a custom interceptor that adds logging. Here is +an example of how you would do this with the secretmanager client. Note this +example registers a UnaryClientInterceptor but you may want/need to register +a StreamClientInterceptor instead-of/as-well depending on what kinds of +RPCs you are calling. + +*WARNING*: Adding this interceptor will log metadata and payloads which may +contain private information. + +```go +package main + +import ( + "context" + "log" + + secretmanager "cloud.google.com/go/secretmanager/apiv1" + "google.golang.org/api/option" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/reflect/protoreflect" +) + +func loggingUnaryInterceptor() grpc.UnaryClientInterceptor { + return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + err := invoker(ctx, method, req, reply, cc, opts...) + log.Printf("Invoked method: %v", method) + md, ok := metadata.FromOutgoingContext(ctx) + if ok { + log.Println("Metadata:") + for k, v := range md { + log.Printf("Key: %v, Value: %v", k, v) + } + } + reqb, merr := protojson.Marshal(req.(protoreflect.ProtoMessage)) + if merr == nil { + log.Printf("Request: %s", reqb) + } + return err + } +} + +func main() { + ctx := context.Background() + // Supply custom gRPC interceptor for use by the client. + client, err := secretmanager.NewClient(ctx, + option.WithGRPCDialOption(grpc.WithUnaryInterceptor(loggingUnaryInterceptor())), + ) + if err != nil { + log.Fatal(err) + } + defer client.Close() + // Use the client +} +``` diff --git a/vendor/cloud.google.com/go/doc.go b/vendor/cloud.google.com/go/doc.go index 06463833e3e1d..833878ec8f313 100644 --- a/vendor/cloud.google.com/go/doc.go +++ b/vendor/cloud.google.com/go/doc.go @@ -17,14 +17,30 @@ Package cloud is the root of the packages used to access Google Cloud Services. See https://godoc.org/cloud.google.com/go for a full list of sub-packages. - -Client Options +# Client Options All clients in sub-packages are configurable via client options. These options are described here: https://godoc.org/google.golang.org/api/option. +## Endpoint Override + +Endpoint configuration is used to specify the URL to which requests are +sent. It is used for services that support or require regional endpoints, as well +as for other use cases such as [testing against fake +servers](https://github.com/googleapis/google-cloud-go/blob/main/testing.md#testing-grpc-services-using-fakes). + +For example, the Vertex AI service recommends that you configure the endpoint to the +location with the features you want that is closest to your physical location or the +location of your users. There is no global endpoint for Vertex AI. See +[Vertex AI - Locations](https://cloud.google.com/vertex-ai/docs/general/locations) +for more details. The following example demonstrates configuring a Vertex AI client +with a regional endpoint: -Authentication and Authorization + ctx := context.Background() + endpoint := "us-central1-aiplatform.googleapis.com:443" + client, err := aiplatform.NewDatasetClient(ctx, option.WithEndpoint(endpoint)) + +# Authentication and Authorization All the clients in sub-packages support authentication via Google Application Default Credentials (see https://cloud.google.com/docs/authentication/production), or @@ -35,11 +51,12 @@ and authenticate clients. For information on how to create and obtain Application Default Credentials, see https://cloud.google.com/docs/authentication/production. Here is an example of a client using ADC to authenticate: - client, err := secretmanager.NewClient(context.Background()) - if err != nil { - // TODO: handle error. - } - _ = client // Use the client. + + client, err := secretmanager.NewClient(context.Background()) + if err != nil { + // TODO: handle error. + } + _ = client // Use the client. You can use a file with credentials to authenticate and authorize, such as a JSON key file associated with a Google service account. Service Account keys can be @@ -47,12 +64,13 @@ created and downloaded from https://console.cloud.google.com/iam-admin/serviceaccounts. This example uses the Secret Manger client, but the same steps apply to the other client libraries underneath this package. Example: - client, err := secretmanager.NewClient(context.Background(), - option.WithCredentialsFile("/path/to/service-account-key.json")) - if err != nil { - // TODO: handle error. - } - _ = client // Use the client. + + client, err := secretmanager.NewClient(context.Background(), + option.WithCredentialsFile("/path/to/service-account-key.json")) + if err != nil { + // TODO: handle error. + } + _ = client // Use the client. In some cases (for instance, you don't want to store secrets on disk), you can create credentials from in-memory JSON and use the WithCredentials option. @@ -62,19 +80,19 @@ the other client libraries underneath this package. Note that scopes can be found at https://developers.google.com/identity/protocols/oauth2/scopes, and are also provided in all auto-generated libraries: for example, cloud.google.com/go/secretmanager/apiv1 provides DefaultAuthScopes. Example: - ctx := context.Background() - creds, err := google.CredentialsFromJSON(ctx, []byte("JSON creds"), secretmanager.DefaultAuthScopes()...) - if err != nil { - // TODO: handle error. - } - client, err := secretmanager.NewClient(ctx, option.WithCredentials(creds)) - if err != nil { - // TODO: handle error. - } - _ = client // Use the client. + ctx := context.Background() + creds, err := google.CredentialsFromJSON(ctx, []byte("JSON creds"), secretmanager.DefaultAuthScopes()...) + if err != nil { + // TODO: handle error. + } + client, err := secretmanager.NewClient(ctx, option.WithCredentials(creds)) + if err != nil { + // TODO: handle error. + } + _ = client // Use the client. -Timeouts and Cancellation +# Timeouts and Cancellation By default, non-streaming methods, like Create or Get, will have a default deadline applied to the context provided at call time, unless a context deadline is already set. Streaming @@ -83,40 +101,42 @@ arrange for cancellation, use contexts. Transient errors will be retried when correctness allows. Here is an example of how to set a timeout for an RPC, use context.WithTimeout: - ctx := context.Background() - // Do not set a timeout on the context passed to NewClient: dialing happens - // asynchronously, and the context is used to refresh credentials in the - // background. - client, err := secretmanager.NewClient(ctx) - if err != nil { - // TODO: handle error. - } - // Time out if it takes more than 10 seconds to create a dataset. - tctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() // Always call cancel. - - req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/project-id/secrets/name"} - if err := client.DeleteSecret(tctx, req); err != nil { - // TODO: handle error. - } + + ctx := context.Background() + // Do not set a timeout on the context passed to NewClient: dialing happens + // asynchronously, and the context is used to refresh credentials in the + // background. + client, err := secretmanager.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // Time out if it takes more than 10 seconds to create a dataset. + tctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() // Always call cancel. + + req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/project-id/secrets/name"} + if err := client.DeleteSecret(tctx, req); err != nil { + // TODO: handle error. + } Here is an example of how to arrange for an RPC to be canceled, use context.WithCancel: - ctx := context.Background() - // Do not cancel the context passed to NewClient: dialing happens asynchronously, - // and the context is used to refresh credentials in the background. - client, err := secretmanager.NewClient(ctx) - if err != nil { - // TODO: handle error. - } - cctx, cancel := context.WithCancel(ctx) - defer cancel() // Always call cancel. - - // TODO: Make the cancel function available to whatever might want to cancel the - // call--perhaps a GUI button. - req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/proj/secrets/name"} - if err := client.DeleteSecret(cctx, req); err != nil { - // TODO: handle error. - } + + ctx := context.Background() + // Do not cancel the context passed to NewClient: dialing happens asynchronously, + // and the context is used to refresh credentials in the background. + client, err := secretmanager.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + cctx, cancel := context.WithCancel(ctx) + defer cancel() // Always call cancel. + + // TODO: Make the cancel function available to whatever might want to cancel the + // call--perhaps a GUI button. + req := &secretmanagerpb.DeleteSecretRequest{Name: "projects/proj/secrets/name"} + if err := client.DeleteSecret(cctx, req); err != nil { + // TODO: handle error. + } To opt out of default deadlines, set the temporary environment variable GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE to "true" prior to client @@ -130,8 +150,7 @@ timeout on the context passed to NewClient. Dialing is non-blocking, so timeouts would be ineffective and would only interfere with credential refreshing, which uses the same context. - -Connection Pooling +# Connection Pooling Connection pooling differs in clients based on their transport. Cloud clients either rely on HTTP or gRPC transports to communicate @@ -147,63 +166,65 @@ of cloud client libraries may specify option.WithGRPCConnectionPool(n) as a clie option to NewClient calls. This configures the underlying gRPC connections to be pooled and addressed in a round robin fashion. - -Using the Libraries with Docker +# Using the Libraries with Docker Minimal docker images like Alpine lack CA certificates. This causes RPCs to appear to hang, because gRPC retries indefinitely. See https://github.com/googleapis/google-cloud-go/issues/928 for more information. - -Debugging +# Debugging To see gRPC logs, set the environment variable GRPC_GO_LOG_SEVERITY_LEVEL. See https://godoc.org/google.golang.org/grpc/grpclog for more information. For HTTP logging, set the GODEBUG environment variable to "http2debug=1" or "http2debug=2". - -Inspecting errors +# Inspecting errors Most of the errors returned by the generated clients are wrapped in an -`apierror.APIError` (https://pkg.go.dev/github.com/googleapis/gax-go/v2/apierror) -and can be further unwrapped into a `grpc.Status` or `googleapi.Error` depending +[github.com/googleapis/gax-go/v2/apierror.APIError] and can be further unwrapped +into a [google.golang.org/grpc/status.Status] or +[google.golang.org/api/googleapi.Error] depending on the transport used to make the call (gRPC or REST). Converting your errors to these types can be a useful way to get more information about what went wrong while debugging. -`apierror.APIError` gives access to specific details in the -error. The transport-specific errors can still be unwrapped using the -`apierror.APIError`. - if err != nil { - var ae *apierror.APIError - if errors.As(err, &ae) { - log.Println(ae.Reason()) - log.Println(ae.Details().Help.GetLinks()) - } - } - -If the gRPC transport was used, the `grpc.Status` can still be parsed using the -`status.FromError` function. - if err != nil { - if s, ok := status.FromError(err); ok { - log.Println(s.Message()) - for _, d := range s.Proto().Details { - log.Println(d) - } - } - } - -If the REST transport was used, the `googleapi.Error` can be parsed in a similar -way. - if err != nil { - var gerr *googleapi.Error - if errors.As(err, &gerr) { - log.Println(gerr.Message) - } - } - -Client Stability +[github.com/googleapis/gax-go/v2/apierror.APIError] gives access to specific +details in the error. The transport-specific errors can still be unwrapped using +the [github.com/googleapis/gax-go/v2/apierror.APIError]. + + if err != nil { + var ae *apierror.APIError + if errors.As(err, &ae) { + log.Println(ae.Reason()) + log.Println(ae.Details().Help.GetLinks()) + } + } + +If the gRPC transport was used, the [google.golang.org/grpc/status.Status] can +still be parsed using the [google.golang.org/grpc/status.FromError] function. + + if err != nil { + if s, ok := status.FromError(err); ok { + log.Println(s.Message()) + for _, d := range s.Proto().Details { + log.Println(d) + } + } + } + +If the REST transport was used, the [google.golang.org/api/googleapi.Error] can +be parsed in a similar way, allowing access to details such as the HTTP response +code. + + if err != nil { + var gerr *googleapi.Error + if errors.As(err, &gerr) { + log.Println(gerr.Message) + } + } + +# Client Stability Clients in this repository are considered alpha or beta unless otherwise marked as stable in the README.md. Semver is not used to communicate stability diff --git a/vendor/cloud.google.com/go/internal/version/update_version.sh b/vendor/cloud.google.com/go/internal/version/update_version.sh deleted file mode 100644 index d7c5a3e219c95..0000000000000 --- a/vendor/cloud.google.com/go/internal/version/update_version.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -# Copyright 2019 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -today=$(date +%Y%m%d) - -sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE - diff --git a/vendor/cloud.google.com/go/internal/version/version.go b/vendor/cloud.google.com/go/internal/version/version.go deleted file mode 100644 index fd9dd91e985a6..0000000000000 --- a/vendor/cloud.google.com/go/internal/version/version.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:generate ./update_version.sh - -// Package version contains version information for Google Cloud Client -// Libraries for Go, as reported in request headers. -package version - -import ( - "runtime" - "strings" - "unicode" -) - -// Repo is the current version of the client libraries in this -// repo. It should be a date in YYYYMMDD format. -const Repo = "20201104" - -// Go returns the Go runtime version. The returned string -// has no whitespace. -func Go() string { - return goVersion -} - -var goVersion = goVer(runtime.Version()) - -const develPrefix = "devel +" - -func goVer(s string) string { - if strings.HasPrefix(s, develPrefix) { - s = s[len(develPrefix):] - if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { - s = s[:p] - } - return s - } - - if strings.HasPrefix(s, "go1") { - s = s[2:] - var prerelease string - if p := strings.IndexFunc(s, notSemverRune); p >= 0 { - s, prerelease = s[:p], s[p:] - } - if strings.HasSuffix(s, ".") { - s += "0" - } else if strings.Count(s, ".") < 2 { - s += ".0" - } - if prerelease != "" { - s += "-" + prerelease - } - return s - } - return "" -} - -func notSemverRune(r rune) bool { - return !strings.ContainsRune("0123456789.", r) -} diff --git a/vendor/cloud.google.com/go/logging/CHANGES.md b/vendor/cloud.google.com/go/logging/CHANGES.md index c326f28f7ca25..a5c037c1c95c7 100644 --- a/vendor/cloud.google.com/go/logging/CHANGES.md +++ b/vendor/cloud.google.com/go/logging/CHANGES.md @@ -1,5 +1,54 @@ # Changes +## [1.7.0](https://github.com/googleapis/google-cloud-go/compare/logging/v1.6.1...logging/v1.7.0) (2023-02-27) + + +### Features + +* **logging:** Add (*Logger). StandardLoggerFromTemplate() method. ([#7261](https://github.com/googleapis/google-cloud-go/issues/7261)) ([533ecbb](https://github.com/googleapis/google-cloud-go/commit/533ecbb19a2833e667ad139a6604fd40dfb43cdc)) +* **logging:** Add REST client ([06a54a1](https://github.com/googleapis/google-cloud-go/commit/06a54a16a5866cce966547c51e203b9e09a25bc0)) +* **logging:** Rewrite signatures and type in terms of new location ([620e6d8](https://github.com/googleapis/google-cloud-go/commit/620e6d828ad8641663ae351bfccfe46281e817ad)) + + +### Bug Fixes + +* **logging:** Correctly populate SourceLocation when logging via (*Logger).StandardLogger ([#7320](https://github.com/googleapis/google-cloud-go/issues/7320)) ([1a0bd13](https://github.com/googleapis/google-cloud-go/commit/1a0bd13b88569826f4ee6528e9cdb59fd26914fa)) +* **logging:** Fix typo in README.md ([#7297](https://github.com/googleapis/google-cloud-go/issues/7297)) ([82aa2ee](https://github.com/googleapis/google-cloud-go/commit/82aa2ee9381f793bd731f1b6789fc18e4b671bd7)) + +## [1.6.1](https://github.com/googleapis/google-cloud-go/compare/logging/v1.6.0...logging/v1.6.1) (2022-12-02) + + +### Bug Fixes + +* **logging:** downgrade some dependencies ([7540152](https://github.com/googleapis/google-cloud-go/commit/754015236d5af7c82a75da218b71a87b9ead6eb5)) + +## [1.6.0](https://github.com/googleapis/google-cloud-go/compare/logging/v1.5.0...logging/v1.6.0) (2022-11-29) + + +### Features + +* **logging:** start generating proto stubs ([0eb700d](https://github.com/googleapis/google-cloud-go/commit/0eb700d17c4cac56f59038f0f3ae5a65257a3d38)) + + +### Bug Fixes + +* **logging:** Fix stdout log http request format ([#7083](https://github.com/googleapis/google-cloud-go/issues/7083)) ([2894e66](https://github.com/googleapis/google-cloud-go/commit/2894e66be7ff7536f725ede453d1834586a361bd)) + +## [1.5.0](https://github.com/googleapis/google-cloud-go/compare/logging/v1.4.2...logging/v1.5.0) (2022-06-25) + + +### Features + +* **logging:** add better version metadata to calls ([d1ad921](https://github.com/googleapis/google-cloud-go/commit/d1ad921d0322e7ce728ca9d255a3cf0437d26add)) +* **logging:** set versionClient to module version ([55f0d92](https://github.com/googleapis/google-cloud-go/commit/55f0d92bf112f14b024b4ab0076c9875a17423c9)) +* **logging:** support structured logging functionality ([#6029](https://github.com/googleapis/google-cloud-go/issues/6029)) ([56f4cdd](https://github.com/googleapis/google-cloud-go/commit/56f4cdd066cc9eaeece2c6fb466d58c3e7c41563)) +* **logging:** Update Logging API with latest changes ([5af548b](https://github.com/googleapis/google-cloud-go/commit/5af548bee4ffde279727b2e1ad9b072925106a74)) + + +### Bug Fixes + +* **logging:** remove instance_name resource label ([#5461](https://github.com/googleapis/google-cloud-go/issues/5461)) ([115385f](https://github.com/googleapis/google-cloud-go/commit/115385f066ee54cf35a093749bc2673a17b3fa08)) + ### [1.4.2](https://www.github.com/googleapis/google-cloud-go/compare/logging/v1.4.1...logging/v1.4.2) (2021-05-20) diff --git a/vendor/cloud.google.com/go/logging/README.md b/vendor/cloud.google.com/go/logging/README.md index b9957e5a365d6..d1fb9ec285933 100644 --- a/vendor/cloud.google.com/go/logging/README.md +++ b/vendor/cloud.google.com/go/logging/README.md @@ -3,8 +3,9 @@ - [About Cloud Logging](https://cloud.google.com/logging/) - [API documentation](https://cloud.google.com/logging/docs) - [Go client documentation](https://pkg.go.dev/cloud.google.com/go/logging) -- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/logging) +- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/main/logging) +For an interactive tutorial on using the client library in a Go application, click [Guide Me](https://console.cloud.google.com/?walkthrough_id=logging__logging-go). ### Example Usage First create a `logging.Client` to use throughout your application: @@ -27,12 +28,38 @@ logger := client.Logger("my-log") logger.Log(logging.Entry{Payload: "something happened!"}) ``` -Close your client before your program exits, to flush any buffered log entries. +If you need to write a critical log entry use synchronous ingestion method. [snip]:# (logging-3) +```go +logger := client.Logger("my-log") +logger.LogSync(context.Background(), logging.Entry{Payload: "something happened!"}) +``` + +Close your client before your program exits, to flush any buffered log entries. +[snip]:# (logging-4) + ```go err = client.Close() if err != nil { // TODO: Handle error. } ``` + +### Logger configuration options + +Creating a Logger using `logging.Logger` accept configuration [LoggerOption](loggeroption.go#L25) arguments. The following options are supported: + +| Configuration option | Arguments | Description | +| -------------------- | --------- | ----------- | +| CommonLabels | `map[string]string` | The set of labels that will be ingested for all log entries ingested by Logger. | +| ConcurrentWriteLimit | `int` | Number of parallel goroutine the Logger will use to ingest logs asynchronously. High number of routines may exhaust API quota. The default is 1. | +| DelayThreshold | `time.Duration` | Maximum time a log entry is buffered on client before being ingested. The default is 1 second. | +| EntryCountThreshold | `int` | Maximum number of log entries to be buffered on client before being ingested. The default is 1000. | +| EntryByteThreshold | `int` | Maximum size in bytes of log entries to be buffered on client before being ingested. The default is 8MiB. | +| EntryByteLimit | `int` | Maximum size in bytes of the single write call to ingest log entries. If EntryByteLimit is smaller than EntryByteThreshold, the latter has no effect. The default is zero, meaning there is no limit. | +| BufferedByteLimit | `int` | Maximum number of bytes that the Logger will keep in memory before returning ErrOverflow. This option limits the total memory consumption of the Logger (but note that each Logger has its own, separate limit). It is possible to reach BufferedByteLimit even if it is larger than EntryByteThreshold or EntryByteLimit, because calls triggered by the latter two options may be enqueued (and hence occupying memory) while new log entries are being added. | +| ContextFunc | `func() (ctx context.Context, afterCall func())` | Callback function to be called to obtain `context.Context` during async log ingestion. | +| SourceLocationPopulation | One of `logging.DoNotPopulateSourceLocation`, `logging.PopulateSourceLocationForDebugEntries` or `logging.AlwaysPopulateSourceLocation` | Controls auto-population of the logging.Entry.SourceLocation field when ingesting log entries. Allows to disable population of source location info, allowing it only for log entries at Debug severity or enable it for all log entries. Enabling it for all entries may result in degradation in performance. Use `logging_test.BenchmarkSourceLocationPopulation` to test performance with and without the option. The default is set to `logging.DoNotPopulateSourceLocation`. | +| PartialSuccess | | Make each write call to Logging service with [partialSuccess flag](https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/write#body.request_body.FIELDS.partial_success) set. The default is to make calls without setting the flag. | +| RedirectAsJSON | `io.Writer` | Converts log entries to Jsonified one line string according to the [structured logging format](https://cloud.google.com/logging/docs/structured-logging#special-payload-fields) and writes it to provided `io.Writer`. Users should use this option with `os.Stdout` and `os.Stderr` to leverage the out-of-process ingestion of logs using logging agents that are deployed in Cloud Logging environments. | diff --git a/vendor/cloud.google.com/go/logging/apiv2/README.md b/vendor/cloud.google.com/go/logging/apiv2/README.md index d2d9a176e6671..64efb249bb8bb 100644 --- a/vendor/cloud.google.com/go/logging/apiv2/README.md +++ b/vendor/cloud.google.com/go/logging/apiv2/README.md @@ -6,6 +6,4 @@ This package includes auto-generated clients for the logging v2 API. Use the handwritten logging client (in the parent directory, cloud.google.com/go/logging) in preference to this. -This code is EXPERIMENTAL and subject to CHANGE AT ANY TIME. - diff --git a/vendor/cloud.google.com/go/logging/apiv2/config_client.go b/vendor/cloud.google.com/go/logging/apiv2/config_client.go index e80e55711082f..64072dabf8b7e 100644 --- a/vendor/cloud.google.com/go/logging/apiv2/config_client.go +++ b/vendor/cloud.google.com/go/logging/apiv2/config_client.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,16 +23,19 @@ import ( "net/url" "time" - "github.com/golang/protobuf/proto" + loggingpb "cloud.google.com/go/logging/apiv2/loggingpb" + "cloud.google.com/go/longrunning" + lroauto "cloud.google.com/go/longrunning/autogen" gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" gtransport "google.golang.org/api/transport/grpc" - loggingpb "google.golang.org/genproto/googleapis/logging/v2" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" ) var newConfigClientHook clientHook @@ -62,15 +65,18 @@ type ConfigCallOptions struct { DeleteExclusion []gax.CallOption GetCmekSettings []gax.CallOption UpdateCmekSettings []gax.CallOption + GetSettings []gax.CallOption + UpdateSettings []gax.CallOption + CopyLogEntries []gax.CallOption } -func defaultConfigClientOptions() []option.ClientOption { +func defaultConfigGRPCClientOptions() []option.ClientOption { return []option.ClientOption{ internaloption.WithDefaultEndpoint("logging.googleapis.com:443"), internaloption.WithDefaultMTLSEndpoint("logging.mtls.googleapis.com:443"), internaloption.WithDefaultAudience("https://logging.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), - option.WithGRPCDialOption(grpc.WithDisableServiceConfig()), + internaloption.EnableJwtWithScope(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -185,35 +191,334 @@ func defaultConfigCallOptions() *ConfigCallOptions { }, GetCmekSettings: []gax.CallOption{}, UpdateCmekSettings: []gax.CallOption{}, + GetSettings: []gax.CallOption{}, + UpdateSettings: []gax.CallOption{}, + CopyLogEntries: []gax.CallOption{}, } } +// internalConfigClient is an interface that defines the methods available from Cloud Logging API. +type internalConfigClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + ListBuckets(context.Context, *loggingpb.ListBucketsRequest, ...gax.CallOption) *LogBucketIterator + GetBucket(context.Context, *loggingpb.GetBucketRequest, ...gax.CallOption) (*loggingpb.LogBucket, error) + CreateBucket(context.Context, *loggingpb.CreateBucketRequest, ...gax.CallOption) (*loggingpb.LogBucket, error) + UpdateBucket(context.Context, *loggingpb.UpdateBucketRequest, ...gax.CallOption) (*loggingpb.LogBucket, error) + DeleteBucket(context.Context, *loggingpb.DeleteBucketRequest, ...gax.CallOption) error + UndeleteBucket(context.Context, *loggingpb.UndeleteBucketRequest, ...gax.CallOption) error + ListViews(context.Context, *loggingpb.ListViewsRequest, ...gax.CallOption) *LogViewIterator + GetView(context.Context, *loggingpb.GetViewRequest, ...gax.CallOption) (*loggingpb.LogView, error) + CreateView(context.Context, *loggingpb.CreateViewRequest, ...gax.CallOption) (*loggingpb.LogView, error) + UpdateView(context.Context, *loggingpb.UpdateViewRequest, ...gax.CallOption) (*loggingpb.LogView, error) + DeleteView(context.Context, *loggingpb.DeleteViewRequest, ...gax.CallOption) error + ListSinks(context.Context, *loggingpb.ListSinksRequest, ...gax.CallOption) *LogSinkIterator + GetSink(context.Context, *loggingpb.GetSinkRequest, ...gax.CallOption) (*loggingpb.LogSink, error) + CreateSink(context.Context, *loggingpb.CreateSinkRequest, ...gax.CallOption) (*loggingpb.LogSink, error) + UpdateSink(context.Context, *loggingpb.UpdateSinkRequest, ...gax.CallOption) (*loggingpb.LogSink, error) + DeleteSink(context.Context, *loggingpb.DeleteSinkRequest, ...gax.CallOption) error + ListExclusions(context.Context, *loggingpb.ListExclusionsRequest, ...gax.CallOption) *LogExclusionIterator + GetExclusion(context.Context, *loggingpb.GetExclusionRequest, ...gax.CallOption) (*loggingpb.LogExclusion, error) + CreateExclusion(context.Context, *loggingpb.CreateExclusionRequest, ...gax.CallOption) (*loggingpb.LogExclusion, error) + UpdateExclusion(context.Context, *loggingpb.UpdateExclusionRequest, ...gax.CallOption) (*loggingpb.LogExclusion, error) + DeleteExclusion(context.Context, *loggingpb.DeleteExclusionRequest, ...gax.CallOption) error + GetCmekSettings(context.Context, *loggingpb.GetCmekSettingsRequest, ...gax.CallOption) (*loggingpb.CmekSettings, error) + UpdateCmekSettings(context.Context, *loggingpb.UpdateCmekSettingsRequest, ...gax.CallOption) (*loggingpb.CmekSettings, error) + GetSettings(context.Context, *loggingpb.GetSettingsRequest, ...gax.CallOption) (*loggingpb.Settings, error) + UpdateSettings(context.Context, *loggingpb.UpdateSettingsRequest, ...gax.CallOption) (*loggingpb.Settings, error) + CopyLogEntries(context.Context, *loggingpb.CopyLogEntriesRequest, ...gax.CallOption) (*CopyLogEntriesOperation, error) + CopyLogEntriesOperation(name string) *CopyLogEntriesOperation +} + // ConfigClient is a client for interacting with Cloud Logging API. -// // Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// Service for configuring sinks used to route log entries. type ConfigClient struct { + // The internal transport-dependent client. + internalClient internalConfigClient + + // The call options for this service. + CallOptions *ConfigCallOptions + + // LROClient is used internally to handle long-running operations. + // It is exposed so that its CallOptions can be modified if required. + // Users should not Close this client. + LROClient *lroauto.OperationsClient +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ConfigClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ConfigClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *ConfigClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// ListBuckets lists log buckets. +func (c *ConfigClient) ListBuckets(ctx context.Context, req *loggingpb.ListBucketsRequest, opts ...gax.CallOption) *LogBucketIterator { + return c.internalClient.ListBuckets(ctx, req, opts...) +} + +// GetBucket gets a log bucket. +func (c *ConfigClient) GetBucket(ctx context.Context, req *loggingpb.GetBucketRequest, opts ...gax.CallOption) (*loggingpb.LogBucket, error) { + return c.internalClient.GetBucket(ctx, req, opts...) +} + +// CreateBucket creates a log bucket that can be used to store log entries. After a bucket +// has been created, the bucket’s location cannot be changed. +func (c *ConfigClient) CreateBucket(ctx context.Context, req *loggingpb.CreateBucketRequest, opts ...gax.CallOption) (*loggingpb.LogBucket, error) { + return c.internalClient.CreateBucket(ctx, req, opts...) +} + +// UpdateBucket updates a log bucket. This method replaces the following fields in the +// existing bucket with values from the new bucket: retention_period +// +// If the retention period is decreased and the bucket is locked, +// FAILED_PRECONDITION will be returned. +// +// If the bucket has a lifecycle_state of DELETE_REQUESTED, then +// FAILED_PRECONDITION will be returned. +// +// After a bucket has been created, the bucket’s location cannot be changed. +func (c *ConfigClient) UpdateBucket(ctx context.Context, req *loggingpb.UpdateBucketRequest, opts ...gax.CallOption) (*loggingpb.LogBucket, error) { + return c.internalClient.UpdateBucket(ctx, req, opts...) +} + +// DeleteBucket deletes a log bucket. +// +// Changes the bucket’s lifecycle_state to the DELETE_REQUESTED state. +// After 7 days, the bucket will be purged and all log entries in the bucket +// will be permanently deleted. +func (c *ConfigClient) DeleteBucket(ctx context.Context, req *loggingpb.DeleteBucketRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteBucket(ctx, req, opts...) +} + +// UndeleteBucket undeletes a log bucket. A bucket that has been deleted can be undeleted +// within the grace period of 7 days. +func (c *ConfigClient) UndeleteBucket(ctx context.Context, req *loggingpb.UndeleteBucketRequest, opts ...gax.CallOption) error { + return c.internalClient.UndeleteBucket(ctx, req, opts...) +} + +// ListViews lists views on a log bucket. +func (c *ConfigClient) ListViews(ctx context.Context, req *loggingpb.ListViewsRequest, opts ...gax.CallOption) *LogViewIterator { + return c.internalClient.ListViews(ctx, req, opts...) +} + +// GetView gets a view on a log bucket… +func (c *ConfigClient) GetView(ctx context.Context, req *loggingpb.GetViewRequest, opts ...gax.CallOption) (*loggingpb.LogView, error) { + return c.internalClient.GetView(ctx, req, opts...) +} + +// CreateView creates a view over log entries in a log bucket. A bucket may contain a +// maximum of 30 views. +func (c *ConfigClient) CreateView(ctx context.Context, req *loggingpb.CreateViewRequest, opts ...gax.CallOption) (*loggingpb.LogView, error) { + return c.internalClient.CreateView(ctx, req, opts...) +} + +// UpdateView updates a view on a log bucket. This method replaces the following fields +// in the existing view with values from the new view: filter. +// If an UNAVAILABLE error is returned, this indicates that system is not in +// a state where it can update the view. If this occurs, please try again in a +// few minutes. +func (c *ConfigClient) UpdateView(ctx context.Context, req *loggingpb.UpdateViewRequest, opts ...gax.CallOption) (*loggingpb.LogView, error) { + return c.internalClient.UpdateView(ctx, req, opts...) +} + +// DeleteView deletes a view on a log bucket. +// If an UNAVAILABLE error is returned, this indicates that system is not in +// a state where it can delete the view. If this occurs, please try again in a +// few minutes. +func (c *ConfigClient) DeleteView(ctx context.Context, req *loggingpb.DeleteViewRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteView(ctx, req, opts...) +} + +// ListSinks lists sinks. +func (c *ConfigClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRequest, opts ...gax.CallOption) *LogSinkIterator { + return c.internalClient.ListSinks(ctx, req, opts...) +} + +// GetSink gets a sink. +func (c *ConfigClient) GetSink(ctx context.Context, req *loggingpb.GetSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) { + return c.internalClient.GetSink(ctx, req, opts...) +} + +// CreateSink creates a sink that exports specified log entries to a destination. The +// export of newly-ingested log entries begins immediately, unless the sink’s +// writer_identity is not permitted to write to the destination. A sink can +// export log entries only from the resource owning the sink. +func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) { + return c.internalClient.CreateSink(ctx, req, opts...) +} + +// UpdateSink updates a sink. This method replaces the following fields in the existing +// sink with values from the new sink: destination, and filter. +// +// The updated sink might also have a new writer_identity; see the +// unique_writer_identity field. +func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) { + return c.internalClient.UpdateSink(ctx, req, opts...) +} + +// DeleteSink deletes a sink. If the sink has a unique writer_identity, then that +// service account is also deleted. +func (c *ConfigClient) DeleteSink(ctx context.Context, req *loggingpb.DeleteSinkRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteSink(ctx, req, opts...) +} + +// ListExclusions lists all the exclusions on the _Default sink in a parent resource. +func (c *ConfigClient) ListExclusions(ctx context.Context, req *loggingpb.ListExclusionsRequest, opts ...gax.CallOption) *LogExclusionIterator { + return c.internalClient.ListExclusions(ctx, req, opts...) +} + +// GetExclusion gets the description of an exclusion in the _Default sink. +func (c *ConfigClient) GetExclusion(ctx context.Context, req *loggingpb.GetExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) { + return c.internalClient.GetExclusion(ctx, req, opts...) +} + +// CreateExclusion creates a new exclusion in the _Default sink in a specified parent +// resource. Only log entries belonging to that resource can be excluded. You +// can have up to 10 exclusions in a resource. +func (c *ConfigClient) CreateExclusion(ctx context.Context, req *loggingpb.CreateExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) { + return c.internalClient.CreateExclusion(ctx, req, opts...) +} + +// UpdateExclusion changes one or more properties of an existing exclusion in the _Default +// sink. +func (c *ConfigClient) UpdateExclusion(ctx context.Context, req *loggingpb.UpdateExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) { + return c.internalClient.UpdateExclusion(ctx, req, opts...) +} + +// DeleteExclusion deletes an exclusion in the _Default sink. +func (c *ConfigClient) DeleteExclusion(ctx context.Context, req *loggingpb.DeleteExclusionRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteExclusion(ctx, req, opts...) +} + +// GetCmekSettings gets the Logging CMEK settings for the given resource. +// +// Note: CMEK for the Log Router can be configured for Google Cloud projects, +// folders, organizations and billing accounts. Once configured for an +// organization, it applies to all projects and folders in the Google Cloud +// organization. +// +// See Enabling CMEK for Log +// Router (at https://cloud.google.com/logging/docs/routing/managed-encryption) +// for more information. +func (c *ConfigClient) GetCmekSettings(ctx context.Context, req *loggingpb.GetCmekSettingsRequest, opts ...gax.CallOption) (*loggingpb.CmekSettings, error) { + return c.internalClient.GetCmekSettings(ctx, req, opts...) +} + +// UpdateCmekSettings updates the Log Router CMEK settings for the given resource. +// +// Note: CMEK for the Log Router can currently only be configured for Google +// Cloud organizations. Once configured, it applies to all projects and +// folders in the Google Cloud organization. +// +// UpdateCmekSettings +// will fail if 1) kms_key_name is invalid, or 2) the associated service +// account does not have the required +// roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, or +// 3) access to the key is disabled. +// +// See Enabling CMEK for Log +// Router (at https://cloud.google.com/logging/docs/routing/managed-encryption) +// for more information. +func (c *ConfigClient) UpdateCmekSettings(ctx context.Context, req *loggingpb.UpdateCmekSettingsRequest, opts ...gax.CallOption) (*loggingpb.CmekSettings, error) { + return c.internalClient.UpdateCmekSettings(ctx, req, opts...) +} + +// GetSettings gets the Log Router settings for the given resource. +// +// Note: Settings for the Log Router can be get for Google Cloud projects, +// folders, organizations and billing accounts. Currently it can only be +// configured for organizations. Once configured for an organization, it +// applies to all projects and folders in the Google Cloud organization. +// +// See Enabling CMEK for Log +// Router (at https://cloud.google.com/logging/docs/routing/managed-encryption) +// for more information. +func (c *ConfigClient) GetSettings(ctx context.Context, req *loggingpb.GetSettingsRequest, opts ...gax.CallOption) (*loggingpb.Settings, error) { + return c.internalClient.GetSettings(ctx, req, opts...) +} + +// UpdateSettings updates the Log Router settings for the given resource. +// +// Note: Settings for the Log Router can currently only be configured for +// Google Cloud organizations. Once configured, it applies to all projects and +// folders in the Google Cloud organization. +// +// UpdateSettings +// will fail if 1) kms_key_name is invalid, or 2) the associated service +// account does not have the required +// roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, or +// 3) access to the key is disabled. 4) location_id is not supported by +// Logging. 5) location_id violate OrgPolicy. +// +// See Enabling CMEK for Log +// Router (at https://cloud.google.com/logging/docs/routing/managed-encryption) +// for more information. +func (c *ConfigClient) UpdateSettings(ctx context.Context, req *loggingpb.UpdateSettingsRequest, opts ...gax.CallOption) (*loggingpb.Settings, error) { + return c.internalClient.UpdateSettings(ctx, req, opts...) +} + +// CopyLogEntries copies a set of log entries from a log bucket to a Cloud Storage bucket. +func (c *ConfigClient) CopyLogEntries(ctx context.Context, req *loggingpb.CopyLogEntriesRequest, opts ...gax.CallOption) (*CopyLogEntriesOperation, error) { + return c.internalClient.CopyLogEntries(ctx, req, opts...) +} + +// CopyLogEntriesOperation returns a new CopyLogEntriesOperation from a given name. +// The name must be that of a previously created CopyLogEntriesOperation, possibly from a different process. +func (c *ConfigClient) CopyLogEntriesOperation(name string) *CopyLogEntriesOperation { + return c.internalClient.CopyLogEntriesOperation(name) +} + +// configGRPCClient is a client for interacting with Cloud Logging API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type configGRPCClient struct { // Connection pool of gRPC connections to the service. connPool gtransport.ConnPool // flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE disableDeadlines bool + // Points back to the CallOptions field of the containing ConfigClient + CallOptions **ConfigCallOptions + // The gRPC API client. configClient loggingpb.ConfigServiceV2Client - // The call options for this service. - CallOptions *ConfigCallOptions + // LROClient is used internally to handle long-running operations. + // It is exposed so that its CallOptions can be modified if required. + // Users should not Close this client. + LROClient **lroauto.OperationsClient // The x-goog-* metadata to be sent with each request. xGoogMetadata metadata.MD } -// NewConfigClient creates a new config service v2 client. +// NewConfigClient creates a new config service v2 client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. // // Service for configuring sinks used to route log entries. func NewConfigClient(ctx context.Context, opts ...option.ClientOption) (*ConfigClient, error) { - clientOpts := defaultConfigClientOptions() - + clientOpts := defaultConfigGRPCClientOptions() if newConfigClientHook != nil { hookOpts, err := newConfigClientHook(ctx, clientHookParams{}) if err != nil { @@ -231,53 +536,70 @@ func NewConfigClient(ctx context.Context, opts ...option.ClientOption) (*ConfigC if err != nil { return nil, err } - c := &ConfigClient{ + client := ConfigClient{CallOptions: defaultConfigCallOptions()} + + c := &configGRPCClient{ connPool: connPool, disableDeadlines: disableDeadlines, - CallOptions: defaultConfigCallOptions(), - - configClient: loggingpb.NewConfigServiceV2Client(connPool), + configClient: loggingpb.NewConfigServiceV2Client(connPool), + CallOptions: &client.CallOptions, } c.setGoogleClientInfo() - return c, nil + client.internalClient = c + + client.LROClient, err = lroauto.NewOperationsClient(ctx, gtransport.WithConnPool(connPool)) + if err != nil { + // This error "should not happen", since we are just reusing old connection pool + // and never actually need to dial. + // If this does happen, we could leak connp. However, we cannot close conn: + // If the user invoked the constructor with option.WithGRPCConn, + // we would close a connection that's still in use. + // TODO: investigate error conditions. + return nil, err + } + c.LROClient = &client.LROClient + return &client, nil } // Connection returns a connection to the API service. // -// Deprecated. -func (c *ConfigClient) Connection() *grpc.ClientConn { +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *configGRPCClient) Connection() *grpc.ClientConn { return c.connPool.Conn() } -// Close closes the connection to the API service. The user should invoke this when -// the client is no longer required. -func (c *ConfigClient) Close() error { - return c.connPool.Close() -} - // setGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. -func (c *ConfigClient) setGoogleClientInfo(keyval ...string) { +func (c *configGRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", versionGo()}, keyval...) - kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) } -// ListBuckets lists buckets. -func (c *ConfigClient) ListBuckets(ctx context.Context, req *loggingpb.ListBucketsRequest, opts ...gax.CallOption) *LogBucketIterator { +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *configGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *configGRPCClient) ListBuckets(ctx context.Context, req *loggingpb.ListBucketsRequest, opts ...gax.CallOption) *LogBucketIterator { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListBuckets[0:len(c.CallOptions.ListBuckets):len(c.CallOptions.ListBuckets)], opts...) + opts = append((*c.CallOptions).ListBuckets[0:len((*c.CallOptions).ListBuckets):len((*c.CallOptions).ListBuckets)], opts...) it := &LogBucketIterator{} req = proto.Clone(req).(*loggingpb.ListBucketsRequest) it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogBucket, string, error) { - var resp *loggingpb.ListBucketsResponse - req.PageToken = pageToken + resp := &loggingpb.ListBucketsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 - } else { + } else if pageSize != 0 { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -300,17 +622,19 @@ func (c *ConfigClient) ListBuckets(ctx context.Context, req *loggingpb.ListBucke it.items = append(it.items, items...) return nextPageToken, nil } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) it.pageInfo.MaxSize = int(req.GetPageSize()) it.pageInfo.Token = req.GetPageToken() + return it } -// GetBucket gets a bucket. -func (c *ConfigClient) GetBucket(ctx context.Context, req *loggingpb.GetBucketRequest, opts ...gax.CallOption) (*loggingpb.LogBucket, error) { +func (c *configGRPCClient) GetBucket(ctx context.Context, req *loggingpb.GetBucketRequest, opts ...gax.CallOption) (*loggingpb.LogBucket, error) { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.GetBucket[0:len(c.CallOptions.GetBucket):len(c.CallOptions.GetBucket)], opts...) + opts = append((*c.CallOptions).GetBucket[0:len((*c.CallOptions).GetBucket):len((*c.CallOptions).GetBucket)], opts...) var resp *loggingpb.LogBucket err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -323,12 +647,11 @@ func (c *ConfigClient) GetBucket(ctx context.Context, req *loggingpb.GetBucketRe return resp, nil } -// CreateBucket creates a bucket that can be used to store log entries. Once a bucket has -// been created, the region cannot be changed. -func (c *ConfigClient) CreateBucket(ctx context.Context, req *loggingpb.CreateBucketRequest, opts ...gax.CallOption) (*loggingpb.LogBucket, error) { +func (c *configGRPCClient) CreateBucket(ctx context.Context, req *loggingpb.CreateBucketRequest, opts ...gax.CallOption) (*loggingpb.LogBucket, error) { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.CreateBucket[0:len(c.CallOptions.CreateBucket):len(c.CallOptions.CreateBucket)], opts...) + opts = append((*c.CallOptions).CreateBucket[0:len((*c.CallOptions).CreateBucket):len((*c.CallOptions).CreateBucket)], opts...) var resp *loggingpb.LogBucket err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -341,20 +664,11 @@ func (c *ConfigClient) CreateBucket(ctx context.Context, req *loggingpb.CreateBu return resp, nil } -// UpdateBucket updates a bucket. This method replaces the following fields in the -// existing bucket with values from the new bucket: retention_period -// -// If the retention period is decreased and the bucket is locked, -// FAILED_PRECONDITION will be returned. -// -// If the bucket has a LifecycleState of DELETE_REQUESTED, FAILED_PRECONDITION -// will be returned. -// -// A buckets region may not be modified after it is created. -func (c *ConfigClient) UpdateBucket(ctx context.Context, req *loggingpb.UpdateBucketRequest, opts ...gax.CallOption) (*loggingpb.LogBucket, error) { +func (c *configGRPCClient) UpdateBucket(ctx context.Context, req *loggingpb.UpdateBucketRequest, opts ...gax.CallOption) (*loggingpb.LogBucket, error) { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.UpdateBucket[0:len(c.CallOptions.UpdateBucket):len(c.CallOptions.UpdateBucket)], opts...) + opts = append((*c.CallOptions).UpdateBucket[0:len((*c.CallOptions).UpdateBucket):len((*c.CallOptions).UpdateBucket)], opts...) var resp *loggingpb.LogBucket err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -367,14 +681,11 @@ func (c *ConfigClient) UpdateBucket(ctx context.Context, req *loggingpb.UpdateBu return resp, nil } -// DeleteBucket deletes a bucket. -// Moves the bucket to the DELETE_REQUESTED state. After 7 days, the -// bucket will be purged and all logs in the bucket will be permanently -// deleted. -func (c *ConfigClient) DeleteBucket(ctx context.Context, req *loggingpb.DeleteBucketRequest, opts ...gax.CallOption) error { +func (c *configGRPCClient) DeleteBucket(ctx context.Context, req *loggingpb.DeleteBucketRequest, opts ...gax.CallOption) error { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.DeleteBucket[0:len(c.CallOptions.DeleteBucket):len(c.CallOptions.DeleteBucket)], opts...) + opts = append((*c.CallOptions).DeleteBucket[0:len((*c.CallOptions).DeleteBucket):len((*c.CallOptions).DeleteBucket)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.configClient.DeleteBucket(ctx, req, settings.GRPC...) @@ -383,12 +694,11 @@ func (c *ConfigClient) DeleteBucket(ctx context.Context, req *loggingpb.DeleteBu return err } -// UndeleteBucket undeletes a bucket. A bucket that has been deleted may be undeleted within -// the grace period of 7 days. -func (c *ConfigClient) UndeleteBucket(ctx context.Context, req *loggingpb.UndeleteBucketRequest, opts ...gax.CallOption) error { +func (c *configGRPCClient) UndeleteBucket(ctx context.Context, req *loggingpb.UndeleteBucketRequest, opts ...gax.CallOption) error { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.UndeleteBucket[0:len(c.CallOptions.UndeleteBucket):len(c.CallOptions.UndeleteBucket)], opts...) + opts = append((*c.CallOptions).UndeleteBucket[0:len((*c.CallOptions).UndeleteBucket):len((*c.CallOptions).UndeleteBucket)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.configClient.UndeleteBucket(ctx, req, settings.GRPC...) @@ -397,19 +707,21 @@ func (c *ConfigClient) UndeleteBucket(ctx context.Context, req *loggingpb.Undele return err } -// ListViews lists views on a bucket. -func (c *ConfigClient) ListViews(ctx context.Context, req *loggingpb.ListViewsRequest, opts ...gax.CallOption) *LogViewIterator { +func (c *configGRPCClient) ListViews(ctx context.Context, req *loggingpb.ListViewsRequest, opts ...gax.CallOption) *LogViewIterator { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListViews[0:len(c.CallOptions.ListViews):len(c.CallOptions.ListViews)], opts...) + opts = append((*c.CallOptions).ListViews[0:len((*c.CallOptions).ListViews):len((*c.CallOptions).ListViews)], opts...) it := &LogViewIterator{} req = proto.Clone(req).(*loggingpb.ListViewsRequest) it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogView, string, error) { - var resp *loggingpb.ListViewsResponse - req.PageToken = pageToken + resp := &loggingpb.ListViewsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 - } else { + } else if pageSize != 0 { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -432,17 +744,19 @@ func (c *ConfigClient) ListViews(ctx context.Context, req *loggingpb.ListViewsRe it.items = append(it.items, items...) return nextPageToken, nil } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) it.pageInfo.MaxSize = int(req.GetPageSize()) it.pageInfo.Token = req.GetPageToken() + return it } -// GetView gets a view. -func (c *ConfigClient) GetView(ctx context.Context, req *loggingpb.GetViewRequest, opts ...gax.CallOption) (*loggingpb.LogView, error) { +func (c *configGRPCClient) GetView(ctx context.Context, req *loggingpb.GetViewRequest, opts ...gax.CallOption) (*loggingpb.LogView, error) { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.GetView[0:len(c.CallOptions.GetView):len(c.CallOptions.GetView)], opts...) + opts = append((*c.CallOptions).GetView[0:len((*c.CallOptions).GetView):len((*c.CallOptions).GetView)], opts...) var resp *loggingpb.LogView err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -455,12 +769,11 @@ func (c *ConfigClient) GetView(ctx context.Context, req *loggingpb.GetViewReques return resp, nil } -// CreateView creates a view over logs in a bucket. A bucket may contain a maximum of -// 50 views. -func (c *ConfigClient) CreateView(ctx context.Context, req *loggingpb.CreateViewRequest, opts ...gax.CallOption) (*loggingpb.LogView, error) { +func (c *configGRPCClient) CreateView(ctx context.Context, req *loggingpb.CreateViewRequest, opts ...gax.CallOption) (*loggingpb.LogView, error) { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.CreateView[0:len(c.CallOptions.CreateView):len(c.CallOptions.CreateView)], opts...) + opts = append((*c.CallOptions).CreateView[0:len((*c.CallOptions).CreateView):len((*c.CallOptions).CreateView)], opts...) var resp *loggingpb.LogView err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -473,12 +786,11 @@ func (c *ConfigClient) CreateView(ctx context.Context, req *loggingpb.CreateView return resp, nil } -// UpdateView updates a view. This method replaces the following fields in the existing -// view with values from the new view: filter. -func (c *ConfigClient) UpdateView(ctx context.Context, req *loggingpb.UpdateViewRequest, opts ...gax.CallOption) (*loggingpb.LogView, error) { +func (c *configGRPCClient) UpdateView(ctx context.Context, req *loggingpb.UpdateViewRequest, opts ...gax.CallOption) (*loggingpb.LogView, error) { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.UpdateView[0:len(c.CallOptions.UpdateView):len(c.CallOptions.UpdateView)], opts...) + opts = append((*c.CallOptions).UpdateView[0:len((*c.CallOptions).UpdateView):len((*c.CallOptions).UpdateView)], opts...) var resp *loggingpb.LogView err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -491,11 +803,11 @@ func (c *ConfigClient) UpdateView(ctx context.Context, req *loggingpb.UpdateView return resp, nil } -// DeleteView deletes a view from a bucket. -func (c *ConfigClient) DeleteView(ctx context.Context, req *loggingpb.DeleteViewRequest, opts ...gax.CallOption) error { +func (c *configGRPCClient) DeleteView(ctx context.Context, req *loggingpb.DeleteViewRequest, opts ...gax.CallOption) error { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.DeleteView[0:len(c.CallOptions.DeleteView):len(c.CallOptions.DeleteView)], opts...) + opts = append((*c.CallOptions).DeleteView[0:len((*c.CallOptions).DeleteView):len((*c.CallOptions).DeleteView)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.configClient.DeleteView(ctx, req, settings.GRPC...) @@ -504,19 +816,21 @@ func (c *ConfigClient) DeleteView(ctx context.Context, req *loggingpb.DeleteView return err } -// ListSinks lists sinks. -func (c *ConfigClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRequest, opts ...gax.CallOption) *LogSinkIterator { +func (c *configGRPCClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRequest, opts ...gax.CallOption) *LogSinkIterator { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListSinks[0:len(c.CallOptions.ListSinks):len(c.CallOptions.ListSinks)], opts...) + opts = append((*c.CallOptions).ListSinks[0:len((*c.CallOptions).ListSinks):len((*c.CallOptions).ListSinks)], opts...) it := &LogSinkIterator{} req = proto.Clone(req).(*loggingpb.ListSinksRequest) it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogSink, string, error) { - var resp *loggingpb.ListSinksResponse - req.PageToken = pageToken + resp := &loggingpb.ListSinksResponse{} + if pageToken != "" { + req.PageToken = pageToken + } if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 - } else { + } else if pageSize != 0 { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -539,22 +853,24 @@ func (c *ConfigClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRe it.items = append(it.items, items...) return nextPageToken, nil } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) it.pageInfo.MaxSize = int(req.GetPageSize()) it.pageInfo.Token = req.GetPageToken() + return it } -// GetSink gets a sink. -func (c *ConfigClient) GetSink(ctx context.Context, req *loggingpb.GetSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) { +func (c *configGRPCClient) GetSink(ctx context.Context, req *loggingpb.GetSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) { if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) defer cancel() ctx = cctx } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "sink_name", url.QueryEscape(req.GetSinkName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.GetSink[0:len(c.CallOptions.GetSink):len(c.CallOptions.GetSink)], opts...) + opts = append((*c.CallOptions).GetSink[0:len((*c.CallOptions).GetSink):len((*c.CallOptions).GetSink)], opts...) var resp *loggingpb.LogSink err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -567,19 +883,16 @@ func (c *ConfigClient) GetSink(ctx context.Context, req *loggingpb.GetSinkReques return resp, nil } -// CreateSink creates a sink that exports specified log entries to a destination. The -// export of newly-ingested log entries begins immediately, unless the sink’s -// writer_identity is not permitted to write to the destination. A sink can -// export log entries only from the resource owning the sink. -func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) { +func (c *configGRPCClient) CreateSink(ctx context.Context, req *loggingpb.CreateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) { if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { cctx, cancel := context.WithTimeout(ctx, 120000*time.Millisecond) defer cancel() ctx = cctx } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.CreateSink[0:len(c.CallOptions.CreateSink):len(c.CallOptions.CreateSink)], opts...) + opts = append((*c.CallOptions).CreateSink[0:len((*c.CallOptions).CreateSink):len((*c.CallOptions).CreateSink)], opts...) var resp *loggingpb.LogSink err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -592,20 +905,16 @@ func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSink return resp, nil } -// UpdateSink updates a sink. This method replaces the following fields in the existing -// sink with values from the new sink: destination, and filter. -// -// The updated sink might also have a new writer_identity; see the -// unique_writer_identity field. -func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) { +func (c *configGRPCClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) { if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) defer cancel() ctx = cctx } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "sink_name", url.QueryEscape(req.GetSinkName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.UpdateSink[0:len(c.CallOptions.UpdateSink):len(c.CallOptions.UpdateSink)], opts...) + opts = append((*c.CallOptions).UpdateSink[0:len((*c.CallOptions).UpdateSink):len((*c.CallOptions).UpdateSink)], opts...) var resp *loggingpb.LogSink err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -618,17 +927,16 @@ func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSink return resp, nil } -// DeleteSink deletes a sink. If the sink has a unique writer_identity, then that -// service account is also deleted. -func (c *ConfigClient) DeleteSink(ctx context.Context, req *loggingpb.DeleteSinkRequest, opts ...gax.CallOption) error { +func (c *configGRPCClient) DeleteSink(ctx context.Context, req *loggingpb.DeleteSinkRequest, opts ...gax.CallOption) error { if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) defer cancel() ctx = cctx } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "sink_name", url.QueryEscape(req.GetSinkName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.DeleteSink[0:len(c.CallOptions.DeleteSink):len(c.CallOptions.DeleteSink)], opts...) + opts = append((*c.CallOptions).DeleteSink[0:len((*c.CallOptions).DeleteSink):len((*c.CallOptions).DeleteSink)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.configClient.DeleteSink(ctx, req, settings.GRPC...) @@ -637,19 +945,21 @@ func (c *ConfigClient) DeleteSink(ctx context.Context, req *loggingpb.DeleteSink return err } -// ListExclusions lists all the exclusions in a parent resource. -func (c *ConfigClient) ListExclusions(ctx context.Context, req *loggingpb.ListExclusionsRequest, opts ...gax.CallOption) *LogExclusionIterator { +func (c *configGRPCClient) ListExclusions(ctx context.Context, req *loggingpb.ListExclusionsRequest, opts ...gax.CallOption) *LogExclusionIterator { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListExclusions[0:len(c.CallOptions.ListExclusions):len(c.CallOptions.ListExclusions)], opts...) + opts = append((*c.CallOptions).ListExclusions[0:len((*c.CallOptions).ListExclusions):len((*c.CallOptions).ListExclusions)], opts...) it := &LogExclusionIterator{} req = proto.Clone(req).(*loggingpb.ListExclusionsRequest) it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogExclusion, string, error) { - var resp *loggingpb.ListExclusionsResponse - req.PageToken = pageToken + resp := &loggingpb.ListExclusionsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 - } else { + } else if pageSize != 0 { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -672,22 +982,24 @@ func (c *ConfigClient) ListExclusions(ctx context.Context, req *loggingpb.ListEx it.items = append(it.items, items...) return nextPageToken, nil } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) it.pageInfo.MaxSize = int(req.GetPageSize()) it.pageInfo.Token = req.GetPageToken() + return it } -// GetExclusion gets the description of an exclusion. -func (c *ConfigClient) GetExclusion(ctx context.Context, req *loggingpb.GetExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) { +func (c *configGRPCClient) GetExclusion(ctx context.Context, req *loggingpb.GetExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) { if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) defer cancel() ctx = cctx } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.GetExclusion[0:len(c.CallOptions.GetExclusion):len(c.CallOptions.GetExclusion)], opts...) + opts = append((*c.CallOptions).GetExclusion[0:len((*c.CallOptions).GetExclusion):len((*c.CallOptions).GetExclusion)], opts...) var resp *loggingpb.LogExclusion err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -700,18 +1012,16 @@ func (c *ConfigClient) GetExclusion(ctx context.Context, req *loggingpb.GetExclu return resp, nil } -// CreateExclusion creates a new exclusion in a specified parent resource. -// Only log entries belonging to that resource can be excluded. -// You can have up to 10 exclusions in a resource. -func (c *ConfigClient) CreateExclusion(ctx context.Context, req *loggingpb.CreateExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) { +func (c *configGRPCClient) CreateExclusion(ctx context.Context, req *loggingpb.CreateExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) { if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { cctx, cancel := context.WithTimeout(ctx, 120000*time.Millisecond) defer cancel() ctx = cctx } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.CreateExclusion[0:len(c.CallOptions.CreateExclusion):len(c.CallOptions.CreateExclusion)], opts...) + opts = append((*c.CallOptions).CreateExclusion[0:len((*c.CallOptions).CreateExclusion):len((*c.CallOptions).CreateExclusion)], opts...) var resp *loggingpb.LogExclusion err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -724,16 +1034,16 @@ func (c *ConfigClient) CreateExclusion(ctx context.Context, req *loggingpb.Creat return resp, nil } -// UpdateExclusion changes one or more properties of an existing exclusion. -func (c *ConfigClient) UpdateExclusion(ctx context.Context, req *loggingpb.UpdateExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) { +func (c *configGRPCClient) UpdateExclusion(ctx context.Context, req *loggingpb.UpdateExclusionRequest, opts ...gax.CallOption) (*loggingpb.LogExclusion, error) { if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { cctx, cancel := context.WithTimeout(ctx, 120000*time.Millisecond) defer cancel() ctx = cctx } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.UpdateExclusion[0:len(c.CallOptions.UpdateExclusion):len(c.CallOptions.UpdateExclusion)], opts...) + opts = append((*c.CallOptions).UpdateExclusion[0:len((*c.CallOptions).UpdateExclusion):len((*c.CallOptions).UpdateExclusion)], opts...) var resp *loggingpb.LogExclusion err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -746,16 +1056,16 @@ func (c *ConfigClient) UpdateExclusion(ctx context.Context, req *loggingpb.Updat return resp, nil } -// DeleteExclusion deletes an exclusion. -func (c *ConfigClient) DeleteExclusion(ctx context.Context, req *loggingpb.DeleteExclusionRequest, opts ...gax.CallOption) error { +func (c *configGRPCClient) DeleteExclusion(ctx context.Context, req *loggingpb.DeleteExclusionRequest, opts ...gax.CallOption) error { if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) defer cancel() ctx = cctx } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.DeleteExclusion[0:len(c.CallOptions.DeleteExclusion):len(c.CallOptions.DeleteExclusion)], opts...) + opts = append((*c.CallOptions).DeleteExclusion[0:len((*c.CallOptions).DeleteExclusion):len((*c.CallOptions).DeleteExclusion)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.configClient.DeleteExclusion(ctx, req, settings.GRPC...) @@ -764,19 +1074,11 @@ func (c *ConfigClient) DeleteExclusion(ctx context.Context, req *loggingpb.Delet return err } -// GetCmekSettings gets the Logs Router CMEK settings for the given resource. -// -// Note: CMEK for the Logs Router can currently only be configured for GCP -// organizations. Once configured, it applies to all projects and folders in -// the GCP organization. -// -// See Enabling CMEK for Logs -// Router (at https://cloud.google.com/logging/docs/routing/managed-encryption) -// for more information. -func (c *ConfigClient) GetCmekSettings(ctx context.Context, req *loggingpb.GetCmekSettingsRequest, opts ...gax.CallOption) (*loggingpb.CmekSettings, error) { +func (c *configGRPCClient) GetCmekSettings(ctx context.Context, req *loggingpb.GetCmekSettingsRequest, opts ...gax.CallOption) (*loggingpb.CmekSettings, error) { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.GetCmekSettings[0:len(c.CallOptions.GetCmekSettings):len(c.CallOptions.GetCmekSettings)], opts...) + opts = append((*c.CallOptions).GetCmekSettings[0:len((*c.CallOptions).GetCmekSettings):len((*c.CallOptions).GetCmekSettings)], opts...) var resp *loggingpb.CmekSettings err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -789,25 +1091,11 @@ func (c *ConfigClient) GetCmekSettings(ctx context.Context, req *loggingpb.GetCm return resp, nil } -// UpdateCmekSettings updates the Logs Router CMEK settings for the given resource. -// -// Note: CMEK for the Logs Router can currently only be configured for GCP -// organizations. Once configured, it applies to all projects and folders in -// the GCP organization. -// -// UpdateCmekSettings -// will fail if 1) kms_key_name is invalid, or 2) the associated service -// account does not have the required -// roles/cloudkms.cryptoKeyEncrypterDecrypter role assigned for the key, or -// 3) access to the key is disabled. -// -// See Enabling CMEK for Logs -// Router (at https://cloud.google.com/logging/docs/routing/managed-encryption) -// for more information. -func (c *ConfigClient) UpdateCmekSettings(ctx context.Context, req *loggingpb.UpdateCmekSettingsRequest, opts ...gax.CallOption) (*loggingpb.CmekSettings, error) { +func (c *configGRPCClient) UpdateCmekSettings(ctx context.Context, req *loggingpb.UpdateCmekSettingsRequest, opts ...gax.CallOption) (*loggingpb.CmekSettings, error) { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.UpdateCmekSettings[0:len(c.CallOptions.UpdateCmekSettings):len(c.CallOptions.UpdateCmekSettings)], opts...) + opts = append((*c.CallOptions).UpdateCmekSettings[0:len((*c.CallOptions).UpdateCmekSettings):len((*c.CallOptions).UpdateCmekSettings)], opts...) var resp *loggingpb.CmekSettings err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -820,6 +1108,126 @@ func (c *ConfigClient) UpdateCmekSettings(ctx context.Context, req *loggingpb.Up return resp, nil } +func (c *configGRPCClient) GetSettings(ctx context.Context, req *loggingpb.GetSettingsRequest, opts ...gax.CallOption) (*loggingpb.Settings, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetSettings[0:len((*c.CallOptions).GetSettings):len((*c.CallOptions).GetSettings)], opts...) + var resp *loggingpb.Settings + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.configClient.GetSettings(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *configGRPCClient) UpdateSettings(ctx context.Context, req *loggingpb.UpdateSettingsRequest, opts ...gax.CallOption) (*loggingpb.Settings, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).UpdateSettings[0:len((*c.CallOptions).UpdateSettings):len((*c.CallOptions).UpdateSettings)], opts...) + var resp *loggingpb.Settings + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.configClient.UpdateSettings(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *configGRPCClient) CopyLogEntries(ctx context.Context, req *loggingpb.CopyLogEntriesRequest, opts ...gax.CallOption) (*CopyLogEntriesOperation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append((*c.CallOptions).CopyLogEntries[0:len((*c.CallOptions).CopyLogEntries):len((*c.CallOptions).CopyLogEntries)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.configClient.CopyLogEntries(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return &CopyLogEntriesOperation{ + lro: longrunning.InternalNewOperation(*c.LROClient, resp), + }, nil +} + +// CopyLogEntriesOperation manages a long-running operation from CopyLogEntries. +type CopyLogEntriesOperation struct { + lro *longrunning.Operation +} + +// CopyLogEntriesOperation returns a new CopyLogEntriesOperation from a given name. +// The name must be that of a previously created CopyLogEntriesOperation, possibly from a different process. +func (c *configGRPCClient) CopyLogEntriesOperation(name string) *CopyLogEntriesOperation { + return &CopyLogEntriesOperation{ + lro: longrunning.InternalNewOperation(*c.LROClient, &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. +// +// See documentation of Poll for error-handling information. +func (op *CopyLogEntriesOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*loggingpb.CopyLogEntriesResponse, error) { + var resp loggingpb.CopyLogEntriesResponse + if err := op.lro.WaitWithInterval(ctx, &resp, time.Minute, opts...); err != nil { + return nil, err + } + return &resp, nil +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true, and the response of the operation is returned. +// If Poll succeeds and the operation has not completed, the returned response and error are both nil. +func (op *CopyLogEntriesOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*loggingpb.CopyLogEntriesResponse, error) { + var resp loggingpb.CopyLogEntriesResponse + if err := op.lro.Poll(ctx, &resp, opts...); err != nil { + return nil, err + } + if !op.Done() { + return nil, nil + } + return &resp, nil +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *CopyLogEntriesOperation) Metadata() (*loggingpb.CopyLogEntriesMetadata, error) { + var meta loggingpb.CopyLogEntriesMetadata + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *CopyLogEntriesOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *CopyLogEntriesOperation) Name() string { + return op.lro.Name() +} + // LogBucketIterator manages a stream of *loggingpb.LogBucket. type LogBucketIterator struct { items []*loggingpb.LogBucket diff --git a/vendor/cloud.google.com/go/logging/apiv2/doc.go b/vendor/cloud.google.com/go/logging/apiv2/doc.go index f4dd7cc056bf2..033fe01b00232 100644 --- a/vendor/cloud.google.com/go/logging/apiv2/doc.go +++ b/vendor/cloud.google.com/go/logging/apiv2/doc.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,21 +17,65 @@ // Package logging is an auto-generated package for the // Cloud Logging API. // -// Writes log entries and manages your Cloud Logging configuration. The table -// entries below are presented in alphabetical order, not in order of common -// use. For explanations of the concepts found in the table entries, read the -// documentation at https://cloud.google.com/logging/docs. +// Writes log entries and manages your Cloud Logging configuration. // -// Use of Context +// # General documentation +// +// For information about setting deadlines, reusing contexts, and more +// please visit https://pkg.go.dev/cloud.google.com/go. +// +// # Example usage +// +// To get started with this package, create a client. +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := logging.NewClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +// +// The client will use your default application credentials. Clients should be reused instead of created as needed. +// The methods of Client are safe for concurrent use by multiple goroutines. +// The returned client must be Closed when it is done being used. +// +// # Using the Client +// +// The following is an example of making an API call with the newly created client. +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := logging.NewClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +// +// req := &loggingpb.DeleteLogRequest{ +// // TODO: Fill request struct fields. +// // See https://pkg.go.dev/cloud.google.com/go/logging/apiv2/loggingpb#DeleteLogRequest. +// } +// err = c.DeleteLog(ctx, req) +// if err != nil { +// // TODO: Handle error. +// } +// +// # Use of Context // // The ctx passed to NewClient is used for authentication requests and // for creating the underlying connection, but is not used for subsequent calls. // Individual methods on the client use the ctx given to them. // // To close the open connection, use the Close() method. -// -// For information about setting deadlines, reusing contexts, and more -// please visit pkg.go.dev/cloud.google.com/go. package logging // import "cloud.google.com/go/logging/apiv2" import ( @@ -51,7 +95,14 @@ import ( type clientHookParams struct{} type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) -const versionClient = "20210518" +var versionClient string + +func getVersionClient() string { + if versionClient == "" { + return "UNKNOWN" + } + return versionClient +} func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { out, _ := metadata.FromOutgoingContext(ctx) diff --git a/vendor/cloud.google.com/go/logging/apiv2/gapic_metadata.json b/vendor/cloud.google.com/go/logging/apiv2/gapic_metadata.json index 63cb6a90b3d02..b125a4fa1dc2d 100644 --- a/vendor/cloud.google.com/go/logging/apiv2/gapic_metadata.json +++ b/vendor/cloud.google.com/go/logging/apiv2/gapic_metadata.json @@ -1,127 +1,142 @@ { - "schema": "1.0", - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods.", - "language": "go", - "protoPackage": "google.logging.v2", - "libraryPackage": "cloud.google.com/go/logging/apiv2", - "services": { - "ConfigServiceV2": { - "clients": { - "grpc": { - "libraryClient": "ConfigClient", - "rpcs": { - "CreateBucket": { - "methods": [ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods.", + "language": "go", + "protoPackage": "google.logging.v2", + "libraryPackage": "cloud.google.com/go/logging/apiv2", + "services": { + "ConfigServiceV2": { + "clients": { + "grpc": { + "libraryClient": "ConfigClient", + "rpcs": { + "CopyLogEntries": { + "methods": [ + "CopyLogEntries" + ] + }, + "CreateBucket": { + "methods": [ "CreateBucket" ] }, - "CreateExclusion": { - "methods": [ + "CreateExclusion": { + "methods": [ "CreateExclusion" ] }, - "CreateSink": { - "methods": [ + "CreateSink": { + "methods": [ "CreateSink" ] }, - "CreateView": { - "methods": [ + "CreateView": { + "methods": [ "CreateView" ] }, - "DeleteBucket": { - "methods": [ + "DeleteBucket": { + "methods": [ "DeleteBucket" ] }, - "DeleteExclusion": { - "methods": [ + "DeleteExclusion": { + "methods": [ "DeleteExclusion" ] }, - "DeleteSink": { - "methods": [ + "DeleteSink": { + "methods": [ "DeleteSink" ] }, - "DeleteView": { - "methods": [ + "DeleteView": { + "methods": [ "DeleteView" ] }, - "GetBucket": { - "methods": [ + "GetBucket": { + "methods": [ "GetBucket" ] }, - "GetCmekSettings": { - "methods": [ + "GetCmekSettings": { + "methods": [ "GetCmekSettings" ] }, - "GetExclusion": { - "methods": [ + "GetExclusion": { + "methods": [ "GetExclusion" ] }, - "GetSink": { - "methods": [ + "GetSettings": { + "methods": [ + "GetSettings" + ] + }, + "GetSink": { + "methods": [ "GetSink" ] }, - "GetView": { - "methods": [ + "GetView": { + "methods": [ "GetView" ] }, - "ListBuckets": { - "methods": [ + "ListBuckets": { + "methods": [ "ListBuckets" ] }, - "ListExclusions": { - "methods": [ + "ListExclusions": { + "methods": [ "ListExclusions" ] }, - "ListSinks": { - "methods": [ + "ListSinks": { + "methods": [ "ListSinks" ] }, - "ListViews": { - "methods": [ + "ListViews": { + "methods": [ "ListViews" ] }, - "UndeleteBucket": { - "methods": [ + "UndeleteBucket": { + "methods": [ "UndeleteBucket" ] }, - "UpdateBucket": { - "methods": [ + "UpdateBucket": { + "methods": [ "UpdateBucket" ] }, - "UpdateCmekSettings": { - "methods": [ + "UpdateCmekSettings": { + "methods": [ "UpdateCmekSettings" ] }, - "UpdateExclusion": { - "methods": [ + "UpdateExclusion": { + "methods": [ "UpdateExclusion" ] }, - "UpdateSink": { - "methods": [ + "UpdateSettings": { + "methods": [ + "UpdateSettings" + ] + }, + "UpdateSink": { + "methods": [ "UpdateSink" ] }, - "UpdateView": { - "methods": [ + "UpdateView": { + "methods": [ "UpdateView" ] } @@ -129,38 +144,38 @@ } } }, - "LoggingServiceV2": { - "clients": { - "grpc": { - "libraryClient": "Client", - "rpcs": { - "DeleteLog": { - "methods": [ + "LoggingServiceV2": { + "clients": { + "grpc": { + "libraryClient": "Client", + "rpcs": { + "DeleteLog": { + "methods": [ "DeleteLog" ] }, - "ListLogEntries": { - "methods": [ + "ListLogEntries": { + "methods": [ "ListLogEntries" ] }, - "ListLogs": { - "methods": [ + "ListLogs": { + "methods": [ "ListLogs" ] }, - "ListMonitoredResourceDescriptors": { - "methods": [ + "ListMonitoredResourceDescriptors": { + "methods": [ "ListMonitoredResourceDescriptors" ] }, - "TailLogEntries": { - "methods": [ + "TailLogEntries": { + "methods": [ "TailLogEntries" ] }, - "WriteLogEntries": { - "methods": [ + "WriteLogEntries": { + "methods": [ "WriteLogEntries" ] } @@ -168,33 +183,33 @@ } } }, - "MetricsServiceV2": { - "clients": { - "grpc": { - "libraryClient": "MetricsClient", - "rpcs": { - "CreateLogMetric": { - "methods": [ + "MetricsServiceV2": { + "clients": { + "grpc": { + "libraryClient": "MetricsClient", + "rpcs": { + "CreateLogMetric": { + "methods": [ "CreateLogMetric" ] }, - "DeleteLogMetric": { - "methods": [ + "DeleteLogMetric": { + "methods": [ "DeleteLogMetric" ] }, - "GetLogMetric": { - "methods": [ + "GetLogMetric": { + "methods": [ "GetLogMetric" ] }, - "ListLogMetrics": { - "methods": [ + "ListLogMetrics": { + "methods": [ "ListLogMetrics" ] }, - "UpdateLogMetric": { - "methods": [ + "UpdateLogMetric": { + "methods": [ "UpdateLogMetric" ] } diff --git a/vendor/cloud.google.com/go/logging/apiv2/logging_client.go b/vendor/cloud.google.com/go/logging/apiv2/logging_client.go index 2ee165ce1cd63..d05aa0b952508 100644 --- a/vendor/cloud.google.com/go/logging/apiv2/logging_client.go +++ b/vendor/cloud.google.com/go/logging/apiv2/logging_client.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,17 +23,17 @@ import ( "net/url" "time" - "github.com/golang/protobuf/proto" + loggingpb "cloud.google.com/go/logging/apiv2/loggingpb" gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" gtransport "google.golang.org/api/transport/grpc" monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" - loggingpb "google.golang.org/genproto/googleapis/logging/v2" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" ) var newClientHook clientHook @@ -48,13 +48,13 @@ type CallOptions struct { TailLogEntries []gax.CallOption } -func defaultClientOptions() []option.ClientOption { +func defaultGRPCClientOptions() []option.ClientOption { return []option.ClientOption{ internaloption.WithDefaultEndpoint("logging.googleapis.com:443"), internaloption.WithDefaultMTLSEndpoint("logging.mtls.googleapis.com:443"), internaloption.WithDefaultAudience("https://logging.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), - option.WithGRPCDialOption(grpc.WithDisableServiceConfig()), + internaloption.EnableJwtWithScope(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -143,32 +143,124 @@ func defaultCallOptions() *CallOptions { } } +// internalClient is an interface that defines the methods available from Cloud Logging API. +type internalClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + DeleteLog(context.Context, *loggingpb.DeleteLogRequest, ...gax.CallOption) error + WriteLogEntries(context.Context, *loggingpb.WriteLogEntriesRequest, ...gax.CallOption) (*loggingpb.WriteLogEntriesResponse, error) + ListLogEntries(context.Context, *loggingpb.ListLogEntriesRequest, ...gax.CallOption) *LogEntryIterator + ListMonitoredResourceDescriptors(context.Context, *loggingpb.ListMonitoredResourceDescriptorsRequest, ...gax.CallOption) *MonitoredResourceDescriptorIterator + ListLogs(context.Context, *loggingpb.ListLogsRequest, ...gax.CallOption) *StringIterator + TailLogEntries(context.Context, ...gax.CallOption) (loggingpb.LoggingServiceV2_TailLogEntriesClient, error) +} + // Client is a client for interacting with Cloud Logging API. -// // Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// Service for ingesting and querying logs. type Client struct { + // The internal transport-dependent client. + internalClient internalClient + + // The call options for this service. + CallOptions *CallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *Client) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// DeleteLog deletes all the log entries in a log for the _Default Log Bucket. The log +// reappears if it receives new entries. Log entries written shortly before +// the delete operation might not be deleted. Entries received after the +// delete operation with a timestamp before the operation will be deleted. +func (c *Client) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteLog(ctx, req, opts...) +} + +// WriteLogEntries writes log entries to Logging. This API method is the +// only way to send log entries to Logging. This method +// is used, directly or indirectly, by the Logging agent +// (fluentd) and all logging libraries configured to use Logging. +// A single request may contain log entries for a maximum of 1000 +// different resources (projects, organizations, billing accounts or +// folders) +func (c *Client) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEntriesRequest, opts ...gax.CallOption) (*loggingpb.WriteLogEntriesResponse, error) { + return c.internalClient.WriteLogEntries(ctx, req, opts...) +} + +// ListLogEntries lists log entries. Use this method to retrieve log entries that originated +// from a project/folder/organization/billing account. For ways to export log +// entries, see Exporting +// Logs (at https://cloud.google.com/logging/docs/export). +func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntriesRequest, opts ...gax.CallOption) *LogEntryIterator { + return c.internalClient.ListLogEntries(ctx, req, opts...) +} + +// ListMonitoredResourceDescriptors lists the descriptors for monitored resource types used by Logging. +func (c *Client) ListMonitoredResourceDescriptors(ctx context.Context, req *loggingpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator { + return c.internalClient.ListMonitoredResourceDescriptors(ctx, req, opts...) +} + +// ListLogs lists the logs in projects, organizations, folders, or billing accounts. +// Only logs that have entries are listed. +func (c *Client) ListLogs(ctx context.Context, req *loggingpb.ListLogsRequest, opts ...gax.CallOption) *StringIterator { + return c.internalClient.ListLogs(ctx, req, opts...) +} + +// TailLogEntries streaming read of log entries as they are ingested. Until the stream is +// terminated, it will continue reading logs. +func (c *Client) TailLogEntries(ctx context.Context, opts ...gax.CallOption) (loggingpb.LoggingServiceV2_TailLogEntriesClient, error) { + return c.internalClient.TailLogEntries(ctx, opts...) +} + +// gRPCClient is a client for interacting with Cloud Logging API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type gRPCClient struct { // Connection pool of gRPC connections to the service. connPool gtransport.ConnPool // flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE disableDeadlines bool + // Points back to the CallOptions field of the containing Client + CallOptions **CallOptions + // The gRPC API client. client loggingpb.LoggingServiceV2Client - // The call options for this service. - CallOptions *CallOptions - // The x-goog-* metadata to be sent with each request. xGoogMetadata metadata.MD } -// NewClient creates a new logging service v2 client. +// NewClient creates a new logging service v2 client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. // // Service for ingesting and querying logs. func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { - clientOpts := defaultClientOptions() - + clientOpts := defaultGRPCClientOptions() if newClientHook != nil { hookOpts, err := newClientHook(ctx, clientHookParams{}) if err != nil { @@ -186,53 +278,54 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error if err != nil { return nil, err } - c := &Client{ + client := Client{CallOptions: defaultCallOptions()} + + c := &gRPCClient{ connPool: connPool, disableDeadlines: disableDeadlines, - CallOptions: defaultCallOptions(), - - client: loggingpb.NewLoggingServiceV2Client(connPool), + client: loggingpb.NewLoggingServiceV2Client(connPool), + CallOptions: &client.CallOptions, } c.setGoogleClientInfo() - return c, nil + client.internalClient = c + + return &client, nil } // Connection returns a connection to the API service. // -// Deprecated. -func (c *Client) Connection() *grpc.ClientConn { +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *gRPCClient) Connection() *grpc.ClientConn { return c.connPool.Conn() } -// Close closes the connection to the API service. The user should invoke this when -// the client is no longer required. -func (c *Client) Close() error { - return c.connPool.Close() -} - // setGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. -func (c *Client) setGoogleClientInfo(keyval ...string) { +func (c *gRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", versionGo()}, keyval...) - kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) } -// DeleteLog deletes all the log entries in a log. The log reappears if it receives new -// entries. Log entries written shortly before the delete operation might not -// be deleted. Entries received after the delete operation with a timestamp -// before the operation will be deleted. -func (c *Client) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest, opts ...gax.CallOption) error { +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *gRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *gRPCClient) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest, opts ...gax.CallOption) error { if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) defer cancel() ctx = cctx } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "log_name", url.QueryEscape(req.GetLogName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.DeleteLog[0:len(c.CallOptions.DeleteLog):len(c.CallOptions.DeleteLog)], opts...) + opts = append((*c.CallOptions).DeleteLog[0:len((*c.CallOptions).DeleteLog):len((*c.CallOptions).DeleteLog)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.client.DeleteLog(ctx, req, settings.GRPC...) @@ -241,21 +334,14 @@ func (c *Client) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest, return err } -// WriteLogEntries writes log entries to Logging. This API method is the -// only way to send log entries to Logging. This method -// is used, directly or indirectly, by the Logging agent -// (fluentd) and all logging libraries configured to use Logging. -// A single request may contain log entries for a maximum of 1000 -// different resources (projects, organizations, billing accounts or -// folders) -func (c *Client) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEntriesRequest, opts ...gax.CallOption) (*loggingpb.WriteLogEntriesResponse, error) { +func (c *gRPCClient) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEntriesRequest, opts ...gax.CallOption) (*loggingpb.WriteLogEntriesResponse, error) { if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) defer cancel() ctx = cctx } ctx = insertMetadata(ctx, c.xGoogMetadata) - opts = append(c.CallOptions.WriteLogEntries[0:len(c.CallOptions.WriteLogEntries):len(c.CallOptions.WriteLogEntries)], opts...) + opts = append((*c.CallOptions).WriteLogEntries[0:len((*c.CallOptions).WriteLogEntries):len((*c.CallOptions).WriteLogEntries)], opts...) var resp *loggingpb.WriteLogEntriesResponse err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -268,21 +354,19 @@ func (c *Client) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEnt return resp, nil } -// ListLogEntries lists log entries. Use this method to retrieve log entries that originated -// from a project/folder/organization/billing account. For ways to export log -// entries, see Exporting -// Logs (at https://cloud.google.com/logging/docs/export). -func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntriesRequest, opts ...gax.CallOption) *LogEntryIterator { +func (c *gRPCClient) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntriesRequest, opts ...gax.CallOption) *LogEntryIterator { ctx = insertMetadata(ctx, c.xGoogMetadata) - opts = append(c.CallOptions.ListLogEntries[0:len(c.CallOptions.ListLogEntries):len(c.CallOptions.ListLogEntries)], opts...) + opts = append((*c.CallOptions).ListLogEntries[0:len((*c.CallOptions).ListLogEntries):len((*c.CallOptions).ListLogEntries)], opts...) it := &LogEntryIterator{} req = proto.Clone(req).(*loggingpb.ListLogEntriesRequest) it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogEntry, string, error) { - var resp *loggingpb.ListLogEntriesResponse - req.PageToken = pageToken + resp := &loggingpb.ListLogEntriesResponse{} + if pageToken != "" { + req.PageToken = pageToken + } if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 - } else { + } else if pageSize != 0 { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -305,24 +389,27 @@ func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntri it.items = append(it.items, items...) return nextPageToken, nil } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) it.pageInfo.MaxSize = int(req.GetPageSize()) it.pageInfo.Token = req.GetPageToken() + return it } -// ListMonitoredResourceDescriptors lists the descriptors for monitored resource types used by Logging. -func (c *Client) ListMonitoredResourceDescriptors(ctx context.Context, req *loggingpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator { +func (c *gRPCClient) ListMonitoredResourceDescriptors(ctx context.Context, req *loggingpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator { ctx = insertMetadata(ctx, c.xGoogMetadata) - opts = append(c.CallOptions.ListMonitoredResourceDescriptors[0:len(c.CallOptions.ListMonitoredResourceDescriptors):len(c.CallOptions.ListMonitoredResourceDescriptors)], opts...) + opts = append((*c.CallOptions).ListMonitoredResourceDescriptors[0:len((*c.CallOptions).ListMonitoredResourceDescriptors):len((*c.CallOptions).ListMonitoredResourceDescriptors)], opts...) it := &MonitoredResourceDescriptorIterator{} req = proto.Clone(req).(*loggingpb.ListMonitoredResourceDescriptorsRequest) it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) { - var resp *loggingpb.ListMonitoredResourceDescriptorsResponse - req.PageToken = pageToken + resp := &loggingpb.ListMonitoredResourceDescriptorsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 - } else { + } else if pageSize != 0 { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -345,26 +432,29 @@ func (c *Client) ListMonitoredResourceDescriptors(ctx context.Context, req *logg it.items = append(it.items, items...) return nextPageToken, nil } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) it.pageInfo.MaxSize = int(req.GetPageSize()) it.pageInfo.Token = req.GetPageToken() + return it } -// ListLogs lists the logs in projects, organizations, folders, or billing accounts. -// Only logs that have entries are listed. -func (c *Client) ListLogs(ctx context.Context, req *loggingpb.ListLogsRequest, opts ...gax.CallOption) *StringIterator { +func (c *gRPCClient) ListLogs(ctx context.Context, req *loggingpb.ListLogsRequest, opts ...gax.CallOption) *StringIterator { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListLogs[0:len(c.CallOptions.ListLogs):len(c.CallOptions.ListLogs)], opts...) + opts = append((*c.CallOptions).ListLogs[0:len((*c.CallOptions).ListLogs):len((*c.CallOptions).ListLogs)], opts...) it := &StringIterator{} req = proto.Clone(req).(*loggingpb.ListLogsRequest) it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) { - var resp *loggingpb.ListLogsResponse - req.PageToken = pageToken + resp := &loggingpb.ListLogsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 - } else { + } else if pageSize != 0 { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -387,18 +477,18 @@ func (c *Client) ListLogs(ctx context.Context, req *loggingpb.ListLogsRequest, o it.items = append(it.items, items...) return nextPageToken, nil } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) it.pageInfo.MaxSize = int(req.GetPageSize()) it.pageInfo.Token = req.GetPageToken() + return it } -// TailLogEntries streaming read of log entries as they are ingested. Until the stream is -// terminated, it will continue reading logs. -func (c *Client) TailLogEntries(ctx context.Context, opts ...gax.CallOption) (loggingpb.LoggingServiceV2_TailLogEntriesClient, error) { +func (c *gRPCClient) TailLogEntries(ctx context.Context, opts ...gax.CallOption) (loggingpb.LoggingServiceV2_TailLogEntriesClient, error) { ctx = insertMetadata(ctx, c.xGoogMetadata) - opts = append(c.CallOptions.TailLogEntries[0:len(c.CallOptions.TailLogEntries):len(c.CallOptions.TailLogEntries)], opts...) var resp loggingpb.LoggingServiceV2_TailLogEntriesClient + opts = append((*c.CallOptions).TailLogEntries[0:len((*c.CallOptions).TailLogEntries):len((*c.CallOptions).TailLogEntries)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error resp, err = c.client.TailLogEntries(ctx, settings.GRPC...) diff --git a/vendor/google.golang.org/genproto/googleapis/logging/v2/log_entry.pb.go b/vendor/cloud.google.com/go/logging/apiv2/loggingpb/log_entry.pb.go similarity index 99% rename from vendor/google.golang.org/genproto/googleapis/logging/v2/log_entry.pb.go rename to vendor/cloud.google.com/go/logging/apiv2/loggingpb/log_entry.pb.go index efb1217a618d9..5ee4fb37bd532 100644 --- a/vendor/google.golang.org/genproto/googleapis/logging/v2/log_entry.pb.go +++ b/vendor/cloud.google.com/go/logging/apiv2/loggingpb/log_entry.pb.go @@ -15,10 +15,10 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/logging/v2/log_entry.proto -package logging +package loggingpb import ( reflect "reflect" @@ -49,10 +49,10 @@ type LogEntry struct { // Required. The resource name of the log to which this log entry belongs: // - // "projects/[PROJECT_ID]/logs/[LOG_ID]" - // "organizations/[ORGANIZATION_ID]/logs/[LOG_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]" - // "folders/[FOLDER_ID]/logs/[LOG_ID]" + // "projects/[PROJECT_ID]/logs/[LOG_ID]" + // "organizations/[ORGANIZATION_ID]/logs/[LOG_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/logs/[LOG_ID]" + // "folders/[FOLDER_ID]/logs/[LOG_ID]" // // A project number may be used in place of PROJECT_ID. The project number is // translated to its corresponding PROJECT_ID internally and the `log_name` @@ -80,6 +80,7 @@ type LogEntry struct { // The log entry payload, which can be one of multiple types. // // Types that are assignable to Payload: + // // *LogEntry_ProtoPayload // *LogEntry_TextPayload // *LogEntry_JsonPayload @@ -328,8 +329,8 @@ type LogEntry_ProtoPayload struct { // The following protocol buffer types are supported; user-defined types // are not supported: // - // "type.googleapis.com/google.cloud.audit.AuditLog" - // "type.googleapis.com/google.appengine.logging.v1.RequestLog" + // "type.googleapis.com/google.cloud.audit.AuditLog" + // "type.googleapis.com/google.appengine.logging.v1.RequestLog" ProtoPayload *anypb.Any `protobuf:"bytes,2,opt,name=proto_payload,json=protoPayload,proto3,oneof"` } diff --git a/vendor/google.golang.org/genproto/googleapis/logging/v2/logging.pb.go b/vendor/cloud.google.com/go/logging/apiv2/loggingpb/logging.pb.go similarity index 98% rename from vendor/google.golang.org/genproto/googleapis/logging/v2/logging.pb.go rename to vendor/cloud.google.com/go/logging/apiv2/loggingpb/logging.pb.go index 0a8a1d5ee5243..cbc6ce968bf04 100644 --- a/vendor/google.golang.org/genproto/googleapis/logging/v2/logging.pb.go +++ b/vendor/cloud.google.com/go/logging/apiv2/loggingpb/logging.pb.go @@ -15,10 +15,10 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/logging/v2/logging.proto -package logging +package loggingpb import ( context "context" @@ -178,8 +178,8 @@ type WriteLogEntriesRequest struct { // // `[LOG_ID]` must be URL-encoded. For example: // - // "projects/my-project-id/logs/syslog" - // "organizations/123/logs/cloudaudit.googleapis.com%2Factivity" + // "projects/my-project-id/logs/syslog" + // "organizations/123/logs/cloudaudit.googleapis.com%2Factivity" // // The permission `logging.logEntries.create` is needed on each project, // organization, billing account, or folder that is receiving new log @@ -189,9 +189,9 @@ type WriteLogEntriesRequest struct { // Optional. A default monitored resource object that is assigned to all log // entries in `entries` that do not specify a value for `resource`. Example: // - // { "type": "gce_instance", - // "labels": { - // "zone": "us-central1-a", "instance_id": "00000000000000000000" }} + // { "type": "gce_instance", + // "labels": { + // "zone": "us-central1-a", "instance_id": "00000000000000000000" }} // // See [LogEntry][google.logging.v2.LogEntry]. Resource *monitoredres.MonitoredResource `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"` @@ -420,10 +420,10 @@ type ListLogEntriesRequest struct { // // May alternatively be one or more views: // - // * `projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` - // * `organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` - // * `billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` - // * `folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // - `projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // - `organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // - `billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // - `folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` // // Projects listed in the `project_ids` field are added to this list. ResourceNames []string `protobuf:"bytes,8,rep,name=resource_names,json=resourceNames,proto3" json:"resource_names,omitempty"` @@ -737,10 +737,10 @@ type ListLogsRequest struct { PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` // Optional. The resource name that owns the logs: // - // * `projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` - // * `organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` - // * `billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` - // * `folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // - `projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // - `organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // - `billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // - `folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` // // To support legacy queries, it could also be: // @@ -888,10 +888,10 @@ type TailLogEntriesRequest struct { // // May alternatively be one or more views: // - // * `projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` - // * `organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` - // * `billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` - // * `folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // - `projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // - `organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // - `billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` + // - `folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]` ResourceNames []string `protobuf:"bytes,1,rep,name=resource_names,json=resourceNames,proto3" json:"resource_names,omitempty"` // Optional. A filter that chooses which log entries to return. See [Advanced // Logs Filters](https://cloud.google.com/logging/docs/view/advanced_filters). diff --git a/vendor/google.golang.org/genproto/googleapis/logging/v2/logging_config.pb.go b/vendor/cloud.google.com/go/logging/apiv2/loggingpb/logging_config.pb.go similarity index 97% rename from vendor/google.golang.org/genproto/googleapis/logging/v2/logging_config.pb.go rename to vendor/cloud.google.com/go/logging/apiv2/loggingpb/logging_config.pb.go index 80e083d0a02bc..95ba5c66bbe71 100644 --- a/vendor/google.golang.org/genproto/googleapis/logging/v2/logging_config.pb.go +++ b/vendor/cloud.google.com/go/logging/apiv2/loggingpb/logging_config.pb.go @@ -15,10 +15,10 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/logging/v2/logging_config.proto -package logging +package loggingpb import ( context "context" @@ -235,7 +235,7 @@ type LogBucket struct { // // For example: // - // `projects/my-project/locations/global/buckets/my-bucket` + // `projects/my-project/locations/global/buckets/my-bucket` // // For a list of supported locations, see [Supported // Regions](https://cloud.google.com/logging/docs/region-support) @@ -385,7 +385,7 @@ type LogView struct { // // For example: // - // `projects/my-project/locations/global/buckets/my-bucket/views/my-view` + // `projects/my-project/locations/global/buckets/my-bucket/views/my-view` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Describes this view. Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` @@ -405,8 +405,8 @@ type LogView struct { // // For example: // - // SOURCE("projects/myproject") AND resource.type = "gce_instance" - // AND LOG_ID("stdout") + // SOURCE("projects/myproject") AND resource.type = "gce_instance" + // AND LOG_ID("stdout") Filter string `protobuf:"bytes,7,opt,name=filter,proto3" json:"filter,omitempty"` } @@ -496,9 +496,9 @@ type LogSink struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Required. The export destination: // - // "storage.googleapis.com/[GCS_BUCKET]" - // "bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]" - // "pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]" + // "storage.googleapis.com/[GCS_BUCKET]" + // "bigquery.googleapis.com/projects/[PROJECT_ID]/datasets/[DATASET]" + // "pubsub.googleapis.com/projects/[PROJECT_ID]/topics/[TOPIC_ID]" // // The sink's `writer_identity`, set when the sink is created, must have // permission to write to the destination or else the log entries are not @@ -513,7 +513,7 @@ type LogSink struct { // // For example: // - // `logName="projects/[PROJECT_ID]/logs/[LOG_ID]" AND severity>=ERROR` + // `logName="projects/[PROJECT_ID]/logs/[LOG_ID]" AND severity>=ERROR` Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"` // Optional. A description of this sink. // @@ -564,12 +564,13 @@ type LogSink struct { // To only export entries from certain child projects, filter on the project // part of the log name: // - // logName:("projects/test-project1/" OR "projects/test-project2/") AND - // resource.type=gce_instance + // logName:("projects/test-project1/" OR "projects/test-project2/") AND + // resource.type=gce_instance IncludeChildren bool `protobuf:"varint,9,opt,name=include_children,json=includeChildren,proto3" json:"include_children,omitempty"` // Destination dependent options. // // Types that are assignable to Options: + // // *LogSink_BigqueryOptions Options isLogSink_Options `protobuf_oneof:"options"` // Output only. The creation timestamp of the sink. @@ -796,10 +797,10 @@ type ListBucketsRequest struct { // Required. The parent resource whose buckets are to be listed: // - // "projects/[PROJECT_ID]/locations/[LOCATION_ID]" - // "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]" - // "folders/[FOLDER_ID]/locations/[LOCATION_ID]" + // "projects/[PROJECT_ID]/locations/[LOCATION_ID]" + // "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]" + // "folders/[FOLDER_ID]/locations/[LOCATION_ID]" // // Note: The locations portion of the resource must be specified, but // supplying the character `-` in place of [LOCATION_ID] will return all @@ -937,11 +938,11 @@ type CreateBucketRequest struct { // Required. The resource in which to create the log bucket: // - // "projects/[PROJECT_ID]/locations/[LOCATION_ID]" + // "projects/[PROJECT_ID]/locations/[LOCATION_ID]" // // For example: // - // `"projects/my-project/locations/global"` + // `"projects/my-project/locations/global"` Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // Required. A client-assigned identifier such as `"my-bucket"`. Identifiers are limited // to 100 characters and can include only letters, digits, underscores, @@ -1014,14 +1015,14 @@ type UpdateBucketRequest struct { // Required. The full resource name of the bucket to update. // - // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" - // "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" - // "folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" // // For example: // - // `"projects/my-project/locations/global/buckets/my-bucket"` + // `"projects/my-project/locations/global/buckets/my-bucket"` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Required. The updated bucket. Bucket *LogBucket `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"` @@ -1097,14 +1098,14 @@ type GetBucketRequest struct { // Required. The resource name of the bucket: // - // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" - // "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" - // "folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" // // For example: // - // `"projects/my-project/locations/global/buckets/my-bucket"` + // `"projects/my-project/locations/global/buckets/my-bucket"` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } @@ -1155,14 +1156,14 @@ type DeleteBucketRequest struct { // Required. The full resource name of the bucket to delete. // - // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" - // "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" - // "folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" // // For example: // - // `"projects/my-project/locations/global/buckets/my-bucket"` + // `"projects/my-project/locations/global/buckets/my-bucket"` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } @@ -1213,14 +1214,14 @@ type UndeleteBucketRequest struct { // Required. The full resource name of the bucket to undelete. // - // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" - // "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" - // "folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "organizations/[ORGANIZATION_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "folders/[FOLDER_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" // // For example: // - // `"projects/my-project/locations/global/buckets/my-bucket"` + // `"projects/my-project/locations/global/buckets/my-bucket"` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } @@ -1271,7 +1272,7 @@ type ListViewsRequest struct { // Required. The bucket whose views are to be listed: // - // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" + // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]" Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // Optional. If present, then retrieve the next batch of results from the preceding call // to this method. `pageToken` must be the value of `nextPageToken` from the @@ -1406,11 +1407,11 @@ type CreateViewRequest struct { // Required. The bucket in which to create the view // - // `"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]"` + // `"projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]"` // // For example: // - // `"projects/my-project/locations/global/buckets/my-bucket"` + // `"projects/my-project/locations/global/buckets/my-bucket"` Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // Required. The id to use for this view. ViewId string `protobuf:"bytes,2,opt,name=view_id,json=viewId,proto3" json:"view_id,omitempty"` @@ -1479,11 +1480,11 @@ type UpdateViewRequest struct { // Required. The full resource name of the view to update // - // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]" + // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]" // // For example: // - // `"projects/my-project/locations/global/buckets/my-bucket/views/my-view"` + // `"projects/my-project/locations/global/buckets/my-bucket/views/my-view"` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Required. The updated view. View *LogView `protobuf:"bytes,2,opt,name=view,proto3" json:"view,omitempty"` @@ -1559,11 +1560,11 @@ type GetViewRequest struct { // Required. The resource name of the policy: // - // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]" + // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]" // // For example: // - // `"projects/my-project/locations/global/buckets/my-bucket/views/my-view"` + // `"projects/my-project/locations/global/buckets/my-bucket/views/my-view"` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } @@ -1614,11 +1615,11 @@ type DeleteViewRequest struct { // Required. The full resource name of the view to delete: // - // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]" + // "projects/[PROJECT_ID]/locations/[LOCATION_ID]/buckets/[BUCKET_ID]/views/[VIEW_ID]" // // For example: // - // `"projects/my-project/locations/global/buckets/my-bucket/views/my-view"` + // `"projects/my-project/locations/global/buckets/my-bucket/views/my-view"` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } @@ -1669,10 +1670,10 @@ type ListSinksRequest struct { // Required. The parent resource whose sinks are to be listed: // - // "projects/[PROJECT_ID]" - // "organizations/[ORGANIZATION_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]" - // "folders/[FOLDER_ID]" + // "projects/[PROJECT_ID]" + // "organizations/[ORGANIZATION_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]" + // "folders/[FOLDER_ID]" Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // Optional. If present, then retrieve the next batch of results from the // preceding call to this method. `pageToken` must be the value of @@ -1806,14 +1807,14 @@ type GetSinkRequest struct { // Required. The resource name of the sink: // - // "projects/[PROJECT_ID]/sinks/[SINK_ID]" - // "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" - // "folders/[FOLDER_ID]/sinks/[SINK_ID]" + // "projects/[PROJECT_ID]/sinks/[SINK_ID]" + // "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" + // "folders/[FOLDER_ID]/sinks/[SINK_ID]" // // For example: // - // `"projects/my-project/sinks/my-sink"` + // `"projects/my-project/sinks/my-sink"` SinkName string `protobuf:"bytes,1,opt,name=sink_name,json=sinkName,proto3" json:"sink_name,omitempty"` } @@ -1864,15 +1865,15 @@ type CreateSinkRequest struct { // Required. The resource in which to create the sink: // - // "projects/[PROJECT_ID]" - // "organizations/[ORGANIZATION_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]" - // "folders/[FOLDER_ID]" + // "projects/[PROJECT_ID]" + // "organizations/[ORGANIZATION_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]" + // "folders/[FOLDER_ID]" // // For examples: // - // `"projects/my-project"` - // `"organizations/123456789"` + // `"projects/my-project"` + // `"organizations/123456789"` Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // Required. The new sink, whose `name` parameter is a sink identifier that // is not already in use. @@ -1953,14 +1954,14 @@ type UpdateSinkRequest struct { // Required. The full resource name of the sink to update, including the parent // resource and the sink identifier: // - // "projects/[PROJECT_ID]/sinks/[SINK_ID]" - // "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" - // "folders/[FOLDER_ID]/sinks/[SINK_ID]" + // "projects/[PROJECT_ID]/sinks/[SINK_ID]" + // "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" + // "folders/[FOLDER_ID]/sinks/[SINK_ID]" // // For example: // - // `"projects/my-project/sinks/my-sink"` + // `"projects/my-project/sinks/my-sink"` SinkName string `protobuf:"bytes,1,opt,name=sink_name,json=sinkName,proto3" json:"sink_name,omitempty"` // Required. The updated sink, whose name is the same identifier that appears as part // of `sink_name`. @@ -1970,11 +1971,11 @@ type UpdateSinkRequest struct { // field on the value of `writer_identity` in the updated sink depends on both // the old and new values of this field: // - // + If the old and new values of this field are both false or both true, + // - If the old and new values of this field are both false or both true, // then there is no change to the sink's `writer_identity`. - // + If the old value is false and the new value is true, then + // - If the old value is false and the new value is true, then // `writer_identity` is changed to a unique service account. - // + It is an error if the old value is true and the new value is + // - It is an error if the old value is true and the new value is // set to false or defaulted to false. UniqueWriterIdentity bool `protobuf:"varint,3,opt,name=unique_writer_identity,json=uniqueWriterIdentity,proto3" json:"unique_writer_identity,omitempty"` // Optional. Field mask that specifies the fields in `sink` that need @@ -1984,7 +1985,7 @@ type UpdateSinkRequest struct { // An empty `updateMask` is temporarily treated as using the following mask // for backwards compatibility purposes: // - // `destination,filter,includeChildren` + // `destination,filter,includeChildren` // // At some point in the future, behavior will be removed and specifying an // empty `updateMask` will be an error. @@ -2065,14 +2066,14 @@ type DeleteSinkRequest struct { // Required. The full resource name of the sink to delete, including the parent // resource and the sink identifier: // - // "projects/[PROJECT_ID]/sinks/[SINK_ID]" - // "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" - // "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" - // "folders/[FOLDER_ID]/sinks/[SINK_ID]" + // "projects/[PROJECT_ID]/sinks/[SINK_ID]" + // "organizations/[ORGANIZATION_ID]/sinks/[SINK_ID]" + // "billingAccounts/[BILLING_ACCOUNT_ID]/sinks/[SINK_ID]" + // "folders/[FOLDER_ID]/sinks/[SINK_ID]" // // For example: // - // `"projects/my-project/sinks/my-sink"` + // `"projects/my-project/sinks/my-sink"` SinkName string `protobuf:"bytes,1,opt,name=sink_name,json=sinkName,proto3" json:"sink_name,omitempty"` } @@ -2141,7 +2142,7 @@ type LogExclusion struct { // For example, the following query matches 99% of low-severity log entries // from Google Cloud Storage buckets: // - // `resource.type=gcs_bucket severity=ERROR" + // "resource.type=gae_app AND severity>=ERROR" // // The maximum length of the filter is 20000 characters. Filter string `protobuf:"bytes,3,opt,name=filter,proto3" json:"filter,omitempty"` @@ -159,14 +159,14 @@ type LogMetric struct { // logs-based metric to extract the values to record from a log entry. // Two functions are supported for value extraction: `EXTRACT(field)` or // `REGEXP_EXTRACT(field, regex)`. The argument are: - // 1. field: The name of the log entry field from which the value is to be - // extracted. - // 2. regex: A regular expression using the Google RE2 syntax - // (https://github.com/google/re2/wiki/Syntax) with a single capture - // group to extract data from the specified log entry field. The value - // of the field is converted to a string before applying the regex. - // It is an error to specify a regex that does not include exactly one - // capture group. + // 1. field: The name of the log entry field from which the value is to be + // extracted. + // 2. regex: A regular expression using the Google RE2 syntax + // (https://github.com/google/re2/wiki/Syntax) with a single capture + // group to extract data from the specified log entry field. The value + // of the field is converted to a string before applying the regex. + // It is an error to specify a regex that does not include exactly one + // capture group. // // The result of the extraction must be convertible to a double type, as the // distribution always records double values. If either the extraction or @@ -327,7 +327,7 @@ type ListLogMetricsRequest struct { // Required. The name of the project containing the metrics: // - // "projects/[PROJECT_ID]" + // "projects/[PROJECT_ID]" Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // Optional. If present, then retrieve the next batch of results from the // preceding call to this method. `pageToken` must be the value of @@ -461,7 +461,7 @@ type GetLogMetricRequest struct { // Required. The resource name of the desired metric: // - // "projects/[PROJECT_ID]/metrics/[METRIC_ID]" + // "projects/[PROJECT_ID]/metrics/[METRIC_ID]" MetricName string `protobuf:"bytes,1,opt,name=metric_name,json=metricName,proto3" json:"metric_name,omitempty"` } @@ -512,7 +512,7 @@ type CreateLogMetricRequest struct { // Required. The resource name of the project in which to create the metric: // - // "projects/[PROJECT_ID]" + // "projects/[PROJECT_ID]" // // The new metric must be provided in the request. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` @@ -575,7 +575,7 @@ type UpdateLogMetricRequest struct { // Required. The resource name of the metric to update: // - // "projects/[PROJECT_ID]/metrics/[METRIC_ID]" + // "projects/[PROJECT_ID]/metrics/[METRIC_ID]" // // The updated metric must be provided in the request and it's // `name` field must be the same as `[METRIC_ID]` If the metric @@ -639,7 +639,7 @@ type DeleteLogMetricRequest struct { // Required. The resource name of the metric to delete: // - // "projects/[PROJECT_ID]/metrics/[METRIC_ID]" + // "projects/[PROJECT_ID]/metrics/[METRIC_ID]" MetricName string `protobuf:"bytes,1,opt,name=metric_name,json=metricName,proto3" json:"metric_name,omitempty"` } diff --git a/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go b/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go index 051282083db75..79fcde1f9d666 100644 --- a/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go +++ b/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go @@ -1,4 +1,4 @@ -// Copyright 2021 Google LLC +// Copyright 2023 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,16 +23,16 @@ import ( "net/url" "time" - "github.com/golang/protobuf/proto" + loggingpb "cloud.google.com/go/logging/apiv2/loggingpb" gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/option/internaloption" gtransport "google.golang.org/api/transport/grpc" - loggingpb "google.golang.org/genproto/googleapis/logging/v2" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" ) var newMetricsClientHook clientHook @@ -46,13 +46,13 @@ type MetricsCallOptions struct { DeleteLogMetric []gax.CallOption } -func defaultMetricsClientOptions() []option.ClientOption { +func defaultMetricsGRPCClientOptions() []option.ClientOption { return []option.ClientOption{ internaloption.WithDefaultEndpoint("logging.googleapis.com:443"), internaloption.WithDefaultMTLSEndpoint("logging.mtls.googleapis.com:443"), internaloption.WithDefaultAudience("https://logging.googleapis.com/"), internaloption.WithDefaultScopes(DefaultAuthScopes()...), - option.WithGRPCDialOption(grpc.WithDisableServiceConfig()), + internaloption.EnableJwtWithScope(), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( grpc.MaxCallRecvMsgSize(math.MaxInt32))), } @@ -116,32 +116,104 @@ func defaultMetricsCallOptions() *MetricsCallOptions { } } +// internalMetricsClient is an interface that defines the methods available from Cloud Logging API. +type internalMetricsClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + ListLogMetrics(context.Context, *loggingpb.ListLogMetricsRequest, ...gax.CallOption) *LogMetricIterator + GetLogMetric(context.Context, *loggingpb.GetLogMetricRequest, ...gax.CallOption) (*loggingpb.LogMetric, error) + CreateLogMetric(context.Context, *loggingpb.CreateLogMetricRequest, ...gax.CallOption) (*loggingpb.LogMetric, error) + UpdateLogMetric(context.Context, *loggingpb.UpdateLogMetricRequest, ...gax.CallOption) (*loggingpb.LogMetric, error) + DeleteLogMetric(context.Context, *loggingpb.DeleteLogMetricRequest, ...gax.CallOption) error +} + // MetricsClient is a client for interacting with Cloud Logging API. -// // Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// Service for configuring logs-based metrics. type MetricsClient struct { + // The internal transport-dependent client. + internalClient internalMetricsClient + + // The call options for this service. + CallOptions *MetricsCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *MetricsClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *MetricsClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *MetricsClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// ListLogMetrics lists logs-based metrics. +func (c *MetricsClient) ListLogMetrics(ctx context.Context, req *loggingpb.ListLogMetricsRequest, opts ...gax.CallOption) *LogMetricIterator { + return c.internalClient.ListLogMetrics(ctx, req, opts...) +} + +// GetLogMetric gets a logs-based metric. +func (c *MetricsClient) GetLogMetric(ctx context.Context, req *loggingpb.GetLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) { + return c.internalClient.GetLogMetric(ctx, req, opts...) +} + +// CreateLogMetric creates a logs-based metric. +func (c *MetricsClient) CreateLogMetric(ctx context.Context, req *loggingpb.CreateLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) { + return c.internalClient.CreateLogMetric(ctx, req, opts...) +} + +// UpdateLogMetric creates or updates a logs-based metric. +func (c *MetricsClient) UpdateLogMetric(ctx context.Context, req *loggingpb.UpdateLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) { + return c.internalClient.UpdateLogMetric(ctx, req, opts...) +} + +// DeleteLogMetric deletes a logs-based metric. +func (c *MetricsClient) DeleteLogMetric(ctx context.Context, req *loggingpb.DeleteLogMetricRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteLogMetric(ctx, req, opts...) +} + +// metricsGRPCClient is a client for interacting with Cloud Logging API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type metricsGRPCClient struct { // Connection pool of gRPC connections to the service. connPool gtransport.ConnPool // flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE disableDeadlines bool + // Points back to the CallOptions field of the containing MetricsClient + CallOptions **MetricsCallOptions + // The gRPC API client. metricsClient loggingpb.MetricsServiceV2Client - // The call options for this service. - CallOptions *MetricsCallOptions - // The x-goog-* metadata to be sent with each request. xGoogMetadata metadata.MD } -// NewMetricsClient creates a new metrics service v2 client. +// NewMetricsClient creates a new metrics service v2 client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. // // Service for configuring logs-based metrics. func NewMetricsClient(ctx context.Context, opts ...option.ClientOption) (*MetricsClient, error) { - clientOpts := defaultMetricsClientOptions() - + clientOpts := defaultMetricsGRPCClientOptions() if newMetricsClientHook != nil { hookOpts, err := newMetricsClientHook(ctx, clientHookParams{}) if err != nil { @@ -159,53 +231,59 @@ func NewMetricsClient(ctx context.Context, opts ...option.ClientOption) (*Metric if err != nil { return nil, err } - c := &MetricsClient{ + client := MetricsClient{CallOptions: defaultMetricsCallOptions()} + + c := &metricsGRPCClient{ connPool: connPool, disableDeadlines: disableDeadlines, - CallOptions: defaultMetricsCallOptions(), - - metricsClient: loggingpb.NewMetricsServiceV2Client(connPool), + metricsClient: loggingpb.NewMetricsServiceV2Client(connPool), + CallOptions: &client.CallOptions, } c.setGoogleClientInfo() - return c, nil + client.internalClient = c + + return &client, nil } // Connection returns a connection to the API service. // -// Deprecated. -func (c *MetricsClient) Connection() *grpc.ClientConn { +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *metricsGRPCClient) Connection() *grpc.ClientConn { return c.connPool.Conn() } -// Close closes the connection to the API service. The user should invoke this when -// the client is no longer required. -func (c *MetricsClient) Close() error { - return c.connPool.Close() -} - // setGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. -func (c *MetricsClient) setGoogleClientInfo(keyval ...string) { +func (c *metricsGRPCClient) setGoogleClientInfo(keyval ...string) { kv := append([]string{"gl-go", versionGo()}, keyval...) - kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) } -// ListLogMetrics lists logs-based metrics. -func (c *MetricsClient) ListLogMetrics(ctx context.Context, req *loggingpb.ListLogMetricsRequest, opts ...gax.CallOption) *LogMetricIterator { +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *metricsGRPCClient) Close() error { + return c.connPool.Close() +} + +func (c *metricsGRPCClient) ListLogMetrics(ctx context.Context, req *loggingpb.ListLogMetricsRequest, opts ...gax.CallOption) *LogMetricIterator { md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.ListLogMetrics[0:len(c.CallOptions.ListLogMetrics):len(c.CallOptions.ListLogMetrics)], opts...) + opts = append((*c.CallOptions).ListLogMetrics[0:len((*c.CallOptions).ListLogMetrics):len((*c.CallOptions).ListLogMetrics)], opts...) it := &LogMetricIterator{} req = proto.Clone(req).(*loggingpb.ListLogMetricsRequest) it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogMetric, string, error) { - var resp *loggingpb.ListLogMetricsResponse - req.PageToken = pageToken + resp := &loggingpb.ListLogMetricsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } if pageSize > math.MaxInt32 { req.PageSize = math.MaxInt32 - } else { + } else if pageSize != 0 { req.PageSize = int32(pageSize) } err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { @@ -228,22 +306,24 @@ func (c *MetricsClient) ListLogMetrics(ctx context.Context, req *loggingpb.ListL it.items = append(it.items, items...) return nextPageToken, nil } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) it.pageInfo.MaxSize = int(req.GetPageSize()) it.pageInfo.Token = req.GetPageToken() + return it } -// GetLogMetric gets a logs-based metric. -func (c *MetricsClient) GetLogMetric(ctx context.Context, req *loggingpb.GetLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) { +func (c *metricsGRPCClient) GetLogMetric(ctx context.Context, req *loggingpb.GetLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) { if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) defer cancel() ctx = cctx } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "metric_name", url.QueryEscape(req.GetMetricName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.GetLogMetric[0:len(c.CallOptions.GetLogMetric):len(c.CallOptions.GetLogMetric)], opts...) + opts = append((*c.CallOptions).GetLogMetric[0:len((*c.CallOptions).GetLogMetric):len((*c.CallOptions).GetLogMetric)], opts...) var resp *loggingpb.LogMetric err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -256,16 +336,16 @@ func (c *MetricsClient) GetLogMetric(ctx context.Context, req *loggingpb.GetLogM return resp, nil } -// CreateLogMetric creates a logs-based metric. -func (c *MetricsClient) CreateLogMetric(ctx context.Context, req *loggingpb.CreateLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) { +func (c *metricsGRPCClient) CreateLogMetric(ctx context.Context, req *loggingpb.CreateLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) { if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) defer cancel() ctx = cctx } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.CreateLogMetric[0:len(c.CallOptions.CreateLogMetric):len(c.CallOptions.CreateLogMetric)], opts...) + opts = append((*c.CallOptions).CreateLogMetric[0:len((*c.CallOptions).CreateLogMetric):len((*c.CallOptions).CreateLogMetric)], opts...) var resp *loggingpb.LogMetric err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -278,16 +358,16 @@ func (c *MetricsClient) CreateLogMetric(ctx context.Context, req *loggingpb.Crea return resp, nil } -// UpdateLogMetric creates or updates a logs-based metric. -func (c *MetricsClient) UpdateLogMetric(ctx context.Context, req *loggingpb.UpdateLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) { +func (c *metricsGRPCClient) UpdateLogMetric(ctx context.Context, req *loggingpb.UpdateLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) { if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) defer cancel() ctx = cctx } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "metric_name", url.QueryEscape(req.GetMetricName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.UpdateLogMetric[0:len(c.CallOptions.UpdateLogMetric):len(c.CallOptions.UpdateLogMetric)], opts...) + opts = append((*c.CallOptions).UpdateLogMetric[0:len((*c.CallOptions).UpdateLogMetric):len((*c.CallOptions).UpdateLogMetric)], opts...) var resp *loggingpb.LogMetric err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error @@ -300,16 +380,16 @@ func (c *MetricsClient) UpdateLogMetric(ctx context.Context, req *loggingpb.Upda return resp, nil } -// DeleteLogMetric deletes a logs-based metric. -func (c *MetricsClient) DeleteLogMetric(ctx context.Context, req *loggingpb.DeleteLogMetricRequest, opts ...gax.CallOption) error { +func (c *metricsGRPCClient) DeleteLogMetric(ctx context.Context, req *loggingpb.DeleteLogMetricRequest, opts ...gax.CallOption) error { if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { cctx, cancel := context.WithTimeout(ctx, 60000*time.Millisecond) defer cancel() ctx = cctx } md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "metric_name", url.QueryEscape(req.GetMetricName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) - opts = append(c.CallOptions.DeleteLogMetric[0:len(c.CallOptions.DeleteLogMetric):len(c.CallOptions.DeleteLogMetric)], opts...) + opts = append((*c.CallOptions).DeleteLogMetric[0:len((*c.CallOptions).DeleteLogMetric):len((*c.CallOptions).DeleteLogMetric)], opts...) err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error _, err = c.metricsClient.DeleteLogMetric(ctx, req, settings.GRPC...) diff --git a/vendor/cloud.google.com/go/logging/apiv2/path_funcs.go b/vendor/cloud.google.com/go/logging/apiv2/path_funcs.go index 37bbe9d4f4437..722a2004752d1 100644 --- a/vendor/cloud.google.com/go/logging/apiv2/path_funcs.go +++ b/vendor/cloud.google.com/go/logging/apiv2/path_funcs.go @@ -17,7 +17,9 @@ package logging // ConfigProjectPath returns the path for the project resource. // // Deprecated: Use -// fmt.Sprintf("projects/%s", project) +// +// fmt.Sprintf("projects/%s", project) +// // instead. func ConfigProjectPath(project string) string { return "" + @@ -29,7 +31,9 @@ func ConfigProjectPath(project string) string { // ConfigSinkPath returns the path for the sink resource. // // Deprecated: Use -// fmt.Sprintf("projects/%s/sinks/%s", project, sink) +// +// fmt.Sprintf("projects/%s/sinks/%s", project, sink) +// // instead. func ConfigSinkPath(project, sink string) string { return "" + @@ -43,7 +47,9 @@ func ConfigSinkPath(project, sink string) string { // ConfigExclusionPath returns the path for the exclusion resource. // // Deprecated: Use -// fmt.Sprintf("projects/%s/exclusions/%s", project, exclusion) +// +// fmt.Sprintf("projects/%s/exclusions/%s", project, exclusion) +// // instead. func ConfigExclusionPath(project, exclusion string) string { return "" + @@ -57,7 +63,9 @@ func ConfigExclusionPath(project, exclusion string) string { // ProjectPath returns the path for the project resource. // // Deprecated: Use -// fmt.Sprintf("projects/%s", project) +// +// fmt.Sprintf("projects/%s", project) +// // instead. func ProjectPath(project string) string { return "" + @@ -69,7 +77,9 @@ func ProjectPath(project string) string { // LogPath returns the path for the log resource. // // Deprecated: Use -// fmt.Sprintf("projects/%s/logs/%s", project, log) +// +// fmt.Sprintf("projects/%s/logs/%s", project, log) +// // instead. func LogPath(project, log string) string { return "" + @@ -83,7 +93,9 @@ func LogPath(project, log string) string { // MetricsProjectPath returns the path for the project resource. // // Deprecated: Use -// fmt.Sprintf("projects/%s", project) +// +// fmt.Sprintf("projects/%s", project) +// // instead. func MetricsProjectPath(project string) string { return "" + @@ -95,7 +107,9 @@ func MetricsProjectPath(project string) string { // MetricsMetricPath returns the path for the metric resource. // // Deprecated: Use -// fmt.Sprintf("projects/%s/metrics/%s", project, metric) +// +// fmt.Sprintf("projects/%s/metrics/%s", project, metric) +// // instead. func MetricsMetricPath(project, metric string) string { return "" + diff --git a/vendor/cloud.google.com/go/logging/apiv2/version.go b/vendor/cloud.google.com/go/logging/apiv2/version.go new file mode 100644 index 0000000000000..da2bceec5c083 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/version.go @@ -0,0 +1,23 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by gapicgen. DO NOT EDIT. + +package logging + +import "cloud.google.com/go/logging/internal" + +func init() { + versionClient = internal.Version +} diff --git a/vendor/cloud.google.com/go/logging/doc.go b/vendor/cloud.google.com/go/logging/doc.go index 3aa9b12b0628c..343d99e0c64e4 100644 --- a/vendor/cloud.google.com/go/logging/doc.go +++ b/vendor/cloud.google.com/go/logging/doc.go @@ -20,8 +20,7 @@ see package cloud.google.com/go/logging/logadmin. This client uses Logging API v2. See https://cloud.google.com/logging/docs/api/v2/ for an introduction to the API. - -Creating a Client +# Creating a Client Use a Client to interact with the Cloud Logging API. @@ -32,8 +31,7 @@ Use a Client to interact with the Cloud Logging API. // TODO: Handle error. } - -Basic Usage +# Basic Usage For most use cases, you'll want to add log entries to a buffer to be periodically flushed (automatically and asynchronously) to the Cloud Logging service. @@ -44,8 +42,7 @@ flushed (automatically and asynchronously) to the Cloud Logging service. // Add entry to log buffer lg.Log(logging.Entry{Payload: "something happened!"}) - -Closing your Client +# Closing your Client You should call Client.Close before your program exits to flush any buffered log entries to the Cloud Logging service. @@ -55,8 +52,7 @@ You should call Client.Close before your program exits to flush any buffered log // TODO: Handle error. } - -Synchronous Logging +# Synchronous Logging For critical errors, you may want to send your log entries immediately. LogSync is slow and will block until the log entry has been sent, so it is @@ -67,8 +63,20 @@ not recommended for normal use. // TODO: Handle error. } +# Redirecting log ingestion + +For cases when runtime environment supports out-of-process log ingestion, +like logging agent, you can opt-in to write log entries to io.Writer instead of +ingesting them to Cloud Logging service. Usually, you will use os.Stdout or os.Stderr as +writers because Google Cloud logging agents are configured to capture logs from standard output. +The entries will be Jsonified and wrote as one line strings following the structured logging format. +See https://cloud.google.com/logging/docs/structured-logging#special-payload-fields for the format description. +To instruct Logger to redirect log entries add RedirectAsJSON() LoggerOption`s. -Payloads + // Create a logger to print structured logs formatted as a single line Json to stdout + loggger := client.Logger("test-log", RedirectAsJSON(os.Stdout)) + +# Payloads An entry payload can be a string, as in the examples above. It can also be any value that can be marshaled to a JSON object, like a map[string]interface{} or a struct: @@ -84,8 +92,18 @@ If you have a []byte of JSON, wrap it in json.RawMessage: j := []byte(`{"Name": "Bob", "Count": 3}`) lg.Log(logging.Entry{Payload: json.RawMessage(j)}) +If you have proto.Message and want to send it as a protobuf payload, marshal it to anypb.Any: + + // import + func logMessage (m proto.Message) { + var payload anypb.Any + err := anypb.MarshalFrom(&payload, m) + if err != nil { + lg.Log(logging.Entry{Payload: payload}) + } + } -The Standard Logger +# The Standard Logger You may want use a standard log.Logger in your program. @@ -93,8 +111,7 @@ You may want use a standard log.Logger in your program. stdlg := lg.StandardLogger(logging.Info) stdlg.Println("some info") - -Log Levels +# Log Levels An Entry may have one of a number of severity levels associated with it. @@ -103,8 +120,7 @@ An Entry may have one of a number of severity levels associated with it. Severity: logging.Critical, } - -Viewing Logs +# Viewing Logs You can view Cloud logs for projects at https://console.cloud.google.com/logs/viewer. Use the dropdown at the top left. When @@ -112,15 +128,14 @@ running from a Google Cloud Platform VM, select "GCE VM Instance". Otherwise, se "Google Project" and then the project ID. Logs for organizations, folders and billing accounts can be viewed on the command line with the "gcloud logging read" command. - -Grouping Logs by Request +# Grouping Logs by Request To group all the log entries written during a single HTTP request, create two Loggers, a "parent" and a "child," with different log IDs. Both should be in the same project, and have the same MonitoredResource type and labels. -- Parent entries must have HTTPRequest.Request (strictly speaking, only Method and URL are necessary), - and HTTPRequest.Status populated. + - Parent entries must have HTTPRequest.Request (strictly speaking, only Method and URL are necessary), + and HTTPRequest.Status populated. - A child entry's timestamp must be within the time interval covered by the parent request. (i.e., before the parent.Timestamp and after the parent.Timestamp - parent.HTTPRequest.Latency. This assumes the diff --git a/vendor/cloud.google.com/go/logging/instrumentation.go b/vendor/cloud.google.com/go/logging/instrumentation.go new file mode 100644 index 0000000000000..b8822e6d1695e --- /dev/null +++ b/vendor/cloud.google.com/go/logging/instrumentation.go @@ -0,0 +1,83 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logging + +import ( + "strings" + + logpb "cloud.google.com/go/logging/apiv2/loggingpb" + "cloud.google.com/go/logging/internal" +) + +const diagnosticLogID = "diagnostic-log" + +// instrumentationPayload defines telemetry log entry payload for capturing instrumentation info +type instrumentationPayload struct { + InstrumentationSource []map[string]string `json:"instrumentation_source"` + Runtime string `json:"runtime,omitempty"` +} + +var ( + instrumentationInfo = &instrumentationPayload{ + InstrumentationSource: []map[string]string{ + { + "name": "go", + "version": internal.Version, + }, + }, + Runtime: internal.VersionGo(), + } +) + +// instrumentLogs appends log entry with library instrumentation info to the +// list of log entries on the first function's call. +func (l *Logger) instrumentLogs(entries []*logpb.LogEntry) ([]*logpb.LogEntry, bool) { + var instrumentationAdded bool + + internal.InstrumentOnce.Do(func() { + ie, err := l.instrumentationEntry() + if err != nil { + // do not retry instrumenting logs if failed creating instrumentation entry + return + } + // populate LogName only when directly ingesting entries + if l.redirectOutputWriter == nil { + ie.LogName = internal.LogPath(l.client.parent, diagnosticLogID) + } + entries = append(entries, ie) + instrumentationAdded = true + }) + return entries, instrumentationAdded +} + +func (l *Logger) instrumentationEntry() (*logpb.LogEntry, error) { + ent := Entry{ + Payload: map[string]*instrumentationPayload{ + "logging.googleapis.com/diagnostic": instrumentationInfo, + }, + } + // pass nil for Logger and 0 for skip levels to ignore auto-population + return toLogEntryInternal(ent, nil, l.client.parent, 0) +} + +// hasInstrumentation returns true if any of the log entries has diagnostic LogId +func hasInstrumentation(entries []*logpb.LogEntry) bool { + for _, ent := range entries { + if strings.HasSuffix(ent.LogName, diagnosticLogID) { + return true + } + } + return false +} diff --git a/vendor/cloud.google.com/go/logging/internal/common.go b/vendor/cloud.google.com/go/logging/internal/common.go index c5788feb0b2c9..28801ac8663ab 100644 --- a/vendor/cloud.google.com/go/logging/internal/common.go +++ b/vendor/cloud.google.com/go/logging/internal/common.go @@ -16,7 +16,10 @@ package internal import ( "fmt" + "runtime" "strings" + "sync" + "unicode" ) const ( @@ -24,6 +27,9 @@ const ( ProdAddr = "logging.googleapis.com:443" ) +// InstrumentOnce guards instrumenting logs one time +var InstrumentOnce = new(sync.Once) + // LogPath creates a formatted path from a parent and a logID. func LogPath(parent, logID string) string { logID = strings.Replace(logID, "/", "%2F", -1) @@ -39,3 +45,40 @@ func LogIDFromPath(parent, path string) string { logID := path[start:] return strings.Replace(logID, "%2F", "/", -1) } + +// VersionGo returns the Go runtime version. The returned string +// has no whitespace, suitable for reporting in header. +func VersionGo() string { + const develPrefix = "devel +" + + s := runtime.Version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } + + notSemverRune := func(r rune) bool { + return !strings.ContainsRune("0123456789.", r) + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + s += "-" + prerelease + } + return s + } + return "UNKNOWN" +} diff --git a/vendor/cloud.google.com/go/logging/internal/environment.go b/vendor/cloud.google.com/go/logging/internal/environment.go new file mode 100644 index 0000000000000..d56d496852a55 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/internal/environment.go @@ -0,0 +1,75 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "io/ioutil" + "net" + "net/http" + "os" + "strings" + "time" + + "cloud.google.com/go/compute/metadata" +) + +// ResourceAttributesGetter abstracts environment lookup methods to query for environment variables, metadata attributes and file content. +type ResourceAttributesGetter interface { + EnvVar(name string) string + Metadata(path string) string + ReadAll(path string) string +} + +var getter ResourceAttributesGetter = &defaultResourceGetter{ + metaClient: metadata.NewClient(&http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 1 * time.Second, + KeepAlive: 10 * time.Second, + }).Dial, + }, + })} + +// ResourceAttributes provides read-only access to the ResourceAtttributesGetter interface implementation. +func ResourceAttributes() ResourceAttributesGetter { + return getter +} + +type defaultResourceGetter struct { + metaClient *metadata.Client +} + +// EnvVar uses os.LookupEnv() to lookup for environment variable by name. +func (g *defaultResourceGetter) EnvVar(name string) string { + return os.Getenv(name) +} + +// Metadata uses metadata package Client.Get() to lookup for metadata attributes by path. +func (g *defaultResourceGetter) Metadata(path string) string { + val, err := g.metaClient.Get(path) + if err != nil { + return "" + } + return strings.TrimSpace(val) +} + +// ReadAll reads all content of the file as a string. +func (g *defaultResourceGetter) ReadAll(path string) string { + bytes, err := ioutil.ReadFile(path) + if err != nil { + return "" + } + return string(bytes) +} diff --git a/vendor/cloud.google.com/go/logging/internal/version.go b/vendor/cloud.google.com/go/logging/internal/version.go new file mode 100644 index 0000000000000..7c251d9945593 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/internal/version.go @@ -0,0 +1,18 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +// Version is the current tagged release of the library. +const Version = "1.7.0" diff --git a/vendor/cloud.google.com/go/logging/loggeroption.go b/vendor/cloud.google.com/go/logging/loggeroption.go new file mode 100644 index 0000000000000..11cfbe88f6f86 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/loggeroption.go @@ -0,0 +1,189 @@ +// Copyright 2021 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logging + +import ( + "context" + "io" + "os" + "time" +) + +// LoggerOption is a configuration option for a Logger. +type LoggerOption interface { + set(*Logger) +} + +// CommonLabels are labels that apply to all log entries written from a Logger, +// so that you don't have to repeat them in each log entry's Labels field. If +// any of the log entries contains a (key, value) with the same key that is in +// CommonLabels, then the entry's (key, value) overrides the one in +// CommonLabels. +func CommonLabels(m map[string]string) LoggerOption { return commonLabels(m) } + +type commonLabels map[string]string + +func (c commonLabels) set(l *Logger) { l.commonLabels = c } + +// ConcurrentWriteLimit determines how many goroutines will send log entries to the +// underlying service. The default is 1. Set ConcurrentWriteLimit to a higher value to +// increase throughput. +func ConcurrentWriteLimit(n int) LoggerOption { return concurrentWriteLimit(n) } + +type concurrentWriteLimit int + +func (c concurrentWriteLimit) set(l *Logger) { l.bundler.HandlerLimit = int(c) } + +// DelayThreshold is the maximum amount of time that an entry should remain +// buffered in memory before a call to the logging service is triggered. Larger +// values of DelayThreshold will generally result in fewer calls to the logging +// service, while increasing the risk that log entries will be lost if the +// process crashes. +// The default is DefaultDelayThreshold. +func DelayThreshold(d time.Duration) LoggerOption { return delayThreshold(d) } + +type delayThreshold time.Duration + +func (d delayThreshold) set(l *Logger) { l.bundler.DelayThreshold = time.Duration(d) } + +// EntryCountThreshold is the maximum number of entries that will be buffered +// in memory before a call to the logging service is triggered. Larger values +// will generally result in fewer calls to the logging service, while +// increasing both memory consumption and the risk that log entries will be +// lost if the process crashes. +// The default is DefaultEntryCountThreshold. +func EntryCountThreshold(n int) LoggerOption { return entryCountThreshold(n) } + +type entryCountThreshold int + +func (e entryCountThreshold) set(l *Logger) { l.bundler.BundleCountThreshold = int(e) } + +// EntryByteThreshold is the maximum number of bytes of entries that will be +// buffered in memory before a call to the logging service is triggered. See +// EntryCountThreshold for a discussion of the tradeoffs involved in setting +// this option. +// The default is DefaultEntryByteThreshold. +func EntryByteThreshold(n int) LoggerOption { return entryByteThreshold(n) } + +type entryByteThreshold int + +func (e entryByteThreshold) set(l *Logger) { l.bundler.BundleByteThreshold = int(e) } + +// EntryByteLimit is the maximum number of bytes of entries that will be sent +// in a single call to the logging service. ErrOversizedEntry is returned if an +// entry exceeds EntryByteLimit. This option limits the size of a single RPC +// payload, to account for network or service issues with large RPCs. If +// EntryByteLimit is smaller than EntryByteThreshold, the latter has no effect. +// The default is zero, meaning there is no limit. +func EntryByteLimit(n int) LoggerOption { return entryByteLimit(n) } + +type entryByteLimit int + +func (e entryByteLimit) set(l *Logger) { l.bundler.BundleByteLimit = int(e) } + +// BufferedByteLimit is the maximum number of bytes that the Logger will keep +// in memory before returning ErrOverflow. This option limits the total memory +// consumption of the Logger (but note that each Logger has its own, separate +// limit). It is possible to reach BufferedByteLimit even if it is larger than +// EntryByteThreshold or EntryByteLimit, because calls triggered by the latter +// two options may be enqueued (and hence occupying memory) while new log +// entries are being added. +// The default is DefaultBufferedByteLimit. +func BufferedByteLimit(n int) LoggerOption { return bufferedByteLimit(n) } + +type bufferedByteLimit int + +func (b bufferedByteLimit) set(l *Logger) { l.bundler.BufferedByteLimit = int(b) } + +// ContextFunc is a function that will be called to obtain a context.Context for the +// WriteLogEntries RPC executed in the background for calls to Logger.Log. The +// default is a function that always returns context.Background. The second return +// value of the function is a function to call after the RPC completes. +// +// The function is not used for calls to Logger.LogSync, since the caller can pass +// in the context directly. +// +// This option is EXPERIMENTAL. It may be changed or removed. +func ContextFunc(f func() (ctx context.Context, afterCall func())) LoggerOption { + return contextFunc(f) +} + +type contextFunc func() (ctx context.Context, afterCall func()) + +func (c contextFunc) set(l *Logger) { l.ctxFunc = c } + +// SourceLocationPopulation is the flag controlling population of the source location info +// in the ingested entries. This options allows to configure automatic population of the +// SourceLocation field for all ingested entries, entries with DEBUG severity or disable it. +// Note that enabling this option can decrease execution time of Logger.Log and Logger.LogSync +// by the factor of 2 or larger. +// The default disables source location population. +// +// This option is not used when an entry is created using ToLogEntry. +func SourceLocationPopulation(f int) LoggerOption { + return sourceLocationOption(f) +} + +const ( + // DoNotPopulateSourceLocation is default for clients when WithSourceLocation is not provided + DoNotPopulateSourceLocation = 0 + // PopulateSourceLocationForDebugEntries is set when WithSourceLocation(PopulateDebugEntries) is provided + PopulateSourceLocationForDebugEntries = 1 + // AlwaysPopulateSourceLocation is set when WithSourceLocation(PopulateAllEntries) is provided + AlwaysPopulateSourceLocation = 2 +) + +type sourceLocationOption int + +func (o sourceLocationOption) set(l *Logger) { + if o == DoNotPopulateSourceLocation || o == PopulateSourceLocationForDebugEntries || o == AlwaysPopulateSourceLocation { + l.populateSourceLocation = int(o) + } +} + +// PartialSuccess sets the partialSuccess flag to true when ingesting a bundle of log entries. +// See https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/write#body.request_body.FIELDS.partial_success +// If not provided the partialSuccess flag is set to false. +func PartialSuccess() LoggerOption { + return &partialSuccessOption{} +} + +type partialSuccessOption struct{} + +func (o *partialSuccessOption) set(l *Logger) { + l.partialSuccess = true +} + +// RedirectAsJSON instructs Logger to redirect output of calls to Log and LogSync to provided io.Writer instead of ingesting +// to Cloud Logging. Logger formats log entries following logging agent's Json format. +// See https://cloud.google.com/logging/docs/structured-logging#special-payload-fields for more info about the format. +// Use this option to delegate log ingestion to an out-of-process logging agent. +// If no writer is provided, the redirect is set to stdout. +func RedirectAsJSON(w io.Writer) LoggerOption { + if w == nil { + w = os.Stdout + } + return &redirectOutputOption{ + writer: w, + } +} + +type redirectOutputOption struct { + writer io.Writer +} + +func (o *redirectOutputOption) set(l *Logger) { + l.redirectOutputWriter = o.writer +} diff --git a/vendor/cloud.google.com/go/logging/logging.go b/vendor/cloud.google.com/go/logging/logging.go index 81ede6afb0ce4..0763640a1454a 100644 --- a/vendor/cloud.google.com/go/logging/logging.go +++ b/vendor/cloud.google.com/go/logging/logging.go @@ -30,17 +30,19 @@ import ( "encoding/json" "errors" "fmt" + "io" "log" "net/http" "regexp" + "runtime" "strconv" "strings" "sync" "time" "unicode/utf8" - "cloud.google.com/go/internal/version" vkit "cloud.google.com/go/logging/apiv2" + logpb "cloud.google.com/go/logging/apiv2/loggingpb" "cloud.google.com/go/logging/internal" "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" @@ -49,7 +51,8 @@ import ( "google.golang.org/api/support/bundler" mrpb "google.golang.org/genproto/googleapis/api/monitoredres" logtypepb "google.golang.org/genproto/googleapis/logging/type" - logpb "google.golang.org/genproto/googleapis/logging/v2" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -75,7 +78,7 @@ const ( DefaultEntryCountThreshold = 1000 // DefaultEntryByteThreshold is the default value for the EntryByteThreshold LoggerOption. - DefaultEntryByteThreshold = 1 << 20 // 1MiB + DefaultEntryByteThreshold = 1 << 23 // 8MiB // DefaultBufferedByteLimit is the default value for the BufferedByteLimit LoggerOption. DefaultBufferedByteLimit = 1 << 30 // 1GiB @@ -87,16 +90,23 @@ const ( defaultWriteTimeout = 10 * time.Minute ) -// For testing: -var now = time.Now +var ( + // ErrRedirectProtoPayloadNotSupported is returned when Logger is configured to redirect output and + // tries to redirect logs with protobuf payload. + ErrRedirectProtoPayloadNotSupported = errors.New("printEntryToStdout: cannot find valid payload") -// ErrOverflow signals that the number of buffered entries for a Logger -// exceeds its BufferLimit. -var ErrOverflow = bundler.ErrOverflow + // For testing: + now = time.Now + toLogEntryInternal = toLogEntryInternalImpl -// ErrOversizedEntry signals that an entry's size exceeds the maximum number of -// bytes that will be sent in a single call to the logging service. -var ErrOversizedEntry = bundler.ErrOversizedItem + // ErrOverflow signals that the number of buffered entries for a Logger + // exceeds its BufferLimit. + ErrOverflow = bundler.ErrOverflow + + // ErrOversizedEntry signals that an entry's size exceeds the maximum number of + // bytes that will be sent in a single call to the logging service. + ErrOversizedEntry = bundler.ErrOversizedItem +) // Client is a Logging client. A Client is associated with a single Cloud project. type Client struct { @@ -125,17 +135,22 @@ type Client struct { // NewClient returns a new logging client associated with the provided parent. // A parent can take any of the following forms: -// projects/PROJECT_ID -// folders/FOLDER_ID -// billingAccounts/ACCOUNT_ID -// organizations/ORG_ID +// +// projects/PROJECT_ID +// folders/FOLDER_ID +// billingAccounts/ACCOUNT_ID +// organizations/ORG_ID +// // for backwards compatibility, a string with no '/' is also allowed and is interpreted // as a project ID. // // By default NewClient uses WriteScope. To use a different scope, call // NewClient using a WithScopes option (see https://godoc.org/google.golang.org/api/option#WithScopes). func NewClient(ctx context.Context, parent string, opts ...option.ClientOption) (*Client, error) { - parent = makeParent(parent) + parent, err := makeParent(parent) + if err != nil { + return nil, err + } opts = append([]option.ClientOption{ option.WithScopes(WriteScope), }, opts...) @@ -143,7 +158,7 @@ func NewClient(ctx context.Context, parent string, opts ...option.ClientOption) if err != nil { return nil, err } - c.SetGoogleClientInfo("gccl", version.Repo) + c.SetGoogleClientInfo("gccl", internal.Version) client := &Client{ client: c, parent: parent, @@ -168,11 +183,15 @@ func NewClient(ctx context.Context, parent string, opts ...option.ClientOption) return client, nil } -func makeParent(parent string) string { +func makeParent(parent string) (string, error) { if !strings.ContainsRune(parent, '/') { - return "projects/" + parent + return "projects/" + parent, nil + } + prefix := strings.Split(parent, "/")[0] + if prefix != "projects" && prefix != "folders" && prefix != "billingAccounts" && prefix != "organizations" { + return parent, fmt.Errorf("parent parameter must start with 'projects/' 'folders/' 'billingAccounts/' or 'organizations/'") } - return parent + return parent, nil } // Ping reports whether the client's connection to the logging service and the @@ -213,7 +232,7 @@ func (c *Client) extractErrorInfo() error { var err error c.mu.Lock() if c.lastErr != nil { - err = fmt.Errorf("saw %d errors; last: %v", c.nErrs, c.lastErr) + err = fmt.Errorf("saw %d errors; last: %w", c.nErrs, c.lastErr) c.nErrs = 0 c.lastErr = nil } @@ -230,114 +249,14 @@ type Logger struct { bundler *bundler.Bundler // Options - commonResource *mrpb.MonitoredResource - commonLabels map[string]string - ctxFunc func() (context.Context, func()) -} - -// A LoggerOption is a configuration option for a Logger. -type LoggerOption interface { - set(*Logger) -} - -// CommonLabels are labels that apply to all log entries written from a Logger, -// so that you don't have to repeat them in each log entry's Labels field. If -// any of the log entries contains a (key, value) with the same key that is in -// CommonLabels, then the entry's (key, value) overrides the one in -// CommonLabels. -func CommonLabels(m map[string]string) LoggerOption { return commonLabels(m) } - -type commonLabels map[string]string - -func (c commonLabels) set(l *Logger) { l.commonLabels = c } - -// ConcurrentWriteLimit determines how many goroutines will send log entries to the -// underlying service. The default is 1. Set ConcurrentWriteLimit to a higher value to -// increase throughput. -func ConcurrentWriteLimit(n int) LoggerOption { return concurrentWriteLimit(n) } - -type concurrentWriteLimit int - -func (c concurrentWriteLimit) set(l *Logger) { l.bundler.HandlerLimit = int(c) } - -// DelayThreshold is the maximum amount of time that an entry should remain -// buffered in memory before a call to the logging service is triggered. Larger -// values of DelayThreshold will generally result in fewer calls to the logging -// service, while increasing the risk that log entries will be lost if the -// process crashes. -// The default is DefaultDelayThreshold. -func DelayThreshold(d time.Duration) LoggerOption { return delayThreshold(d) } - -type delayThreshold time.Duration - -func (d delayThreshold) set(l *Logger) { l.bundler.DelayThreshold = time.Duration(d) } - -// EntryCountThreshold is the maximum number of entries that will be buffered -// in memory before a call to the logging service is triggered. Larger values -// will generally result in fewer calls to the logging service, while -// increasing both memory consumption and the risk that log entries will be -// lost if the process crashes. -// The default is DefaultEntryCountThreshold. -func EntryCountThreshold(n int) LoggerOption { return entryCountThreshold(n) } - -type entryCountThreshold int - -func (e entryCountThreshold) set(l *Logger) { l.bundler.BundleCountThreshold = int(e) } - -// EntryByteThreshold is the maximum number of bytes of entries that will be -// buffered in memory before a call to the logging service is triggered. See -// EntryCountThreshold for a discussion of the tradeoffs involved in setting -// this option. -// The default is DefaultEntryByteThreshold. -func EntryByteThreshold(n int) LoggerOption { return entryByteThreshold(n) } - -type entryByteThreshold int - -func (e entryByteThreshold) set(l *Logger) { l.bundler.BundleByteThreshold = int(e) } - -// EntryByteLimit is the maximum number of bytes of entries that will be sent -// in a single call to the logging service. ErrOversizedEntry is returned if an -// entry exceeds EntryByteLimit. This option limits the size of a single RPC -// payload, to account for network or service issues with large RPCs. If -// EntryByteLimit is smaller than EntryByteThreshold, the latter has no effect. -// The default is zero, meaning there is no limit. -func EntryByteLimit(n int) LoggerOption { return entryByteLimit(n) } - -type entryByteLimit int - -func (e entryByteLimit) set(l *Logger) { l.bundler.BundleByteLimit = int(e) } - -// BufferedByteLimit is the maximum number of bytes that the Logger will keep -// in memory before returning ErrOverflow. This option limits the total memory -// consumption of the Logger (but note that each Logger has its own, separate -// limit). It is possible to reach BufferedByteLimit even if it is larger than -// EntryByteThreshold or EntryByteLimit, because calls triggered by the latter -// two options may be enqueued (and hence occupying memory) while new log -// entries are being added. -// The default is DefaultBufferedByteLimit. -func BufferedByteLimit(n int) LoggerOption { return bufferedByteLimit(n) } - -type bufferedByteLimit int - -func (b bufferedByteLimit) set(l *Logger) { l.bundler.BufferedByteLimit = int(b) } - -// ContextFunc is a function that will be called to obtain a context.Context for the -// WriteLogEntries RPC executed in the background for calls to Logger.Log. The -// default is a function that always returns context.Background. The second return -// value of the function is a function to call after the RPC completes. -// -// The function is not used for calls to Logger.LogSync, since the caller can pass -// in the context directly. -// -// This option is EXPERIMENTAL. It may be changed or removed. -func ContextFunc(f func() (ctx context.Context, afterCall func())) LoggerOption { - return contextFunc(f) + commonResource *mrpb.MonitoredResource + commonLabels map[string]string + ctxFunc func() (context.Context, func()) + populateSourceLocation int + partialSuccess bool + redirectOutputWriter io.Writer } -type contextFunc func() (ctx context.Context, afterCall func()) - -func (c contextFunc) set(l *Logger) { l.ctxFunc = c } - // Logger returns a Logger that will write entries with the given log ID, such as // "syslog". A log ID must be less than 512 characters long and can only // include the following characters: upper and lower case alphanumeric @@ -349,10 +268,13 @@ func (c *Client) Logger(logID string, opts ...LoggerOption) *Logger { r = monitoredResource(c.parent) } l := &Logger{ - client: c, - logName: internal.LogPath(c.parent, logID), - commonResource: r, - ctxFunc: func() (context.Context, func()) { return context.Background(), nil }, + client: c, + logName: internal.LogPath(c.parent, logID), + commonResource: r, + ctxFunc: func() (context.Context, func()) { return context.Background(), nil }, + populateSourceLocation: DoNotPopulateSourceLocation, + partialSuccess: false, + redirectOutputWriter: nil, } l.bundler = bundler.NewBundler(&logpb.LogEntry{}, func(entries interface{}) { l.writeLogEntries(entries.([]*logpb.LogEntry)) @@ -366,7 +288,8 @@ func (c *Client) Logger(logID string, opts ...LoggerOption) *Logger { } l.stdLoggers = map[Severity]*log.Logger{} for s := range severityName { - l.stdLoggers[s] = log.New(severityWriter{l, s}, "", 0) + e := Entry{Severity: s} + l.stdLoggers[s] = log.New(templateEntryWriter{l, &e}, "", 0) } c.loggers.Add(1) @@ -380,16 +303,20 @@ func (c *Client) Logger(logID string, opts ...LoggerOption) *Logger { return l } -type severityWriter struct { - l *Logger - s Severity +type templateEntryWriter struct { + l *Logger + template *Entry } -func (w severityWriter) Write(p []byte) (n int, err error) { - w.l.Log(Entry{ - Severity: w.s, - Payload: string(p), - }) +func (w templateEntryWriter) Write(p []byte) (n int, err error) { + e := *w.template + e.Payload = string(p) + // The second argument to logInternal() is how many frames to skip + // from the call stack when determining the source location. In the + // current implementation of log.Logger (i.e. Go's logging library) + // the Write() method is called 2 calls deep so we need to skip 3 + // frames to account for the call to logInternal() itself. + w.l.logInternal(e, 3) return len(p), nil } @@ -464,10 +391,14 @@ func (v Severity) String() string { // Severity. func (v *Severity) UnmarshalJSON(data []byte) error { var s string - if err := json.Unmarshal(data, &s); err != nil { - return err + var i int + if strErr := json.Unmarshal(data, &s); strErr == nil { + *v = ParseSeverity(s) + } else if intErr := json.Unmarshal(data, &i); intErr == nil { + *v = Severity(i) + } else { + return fmt.Errorf("%v; %v", strErr, intErr) } - *v = ParseSeverity(s) return nil } @@ -659,13 +590,13 @@ func toProtoStruct(v interface{}) (*structpb.Struct, error) { } else { jb, err = json.Marshal(v) if err != nil { - return nil, fmt.Errorf("logging: json.Marshal: %v", err) + return nil, fmt.Errorf("logging: json.Marshal: %w", err) } } var m map[string]interface{} err = json.Unmarshal(jb, &m) if err != nil { - return nil, fmt.Errorf("logging: json.Unmarshal: %v", err) + return nil, fmt.Errorf("logging: json.Unmarshal: %w", err) } return jsonMapToProtoStruct(m), nil } @@ -705,28 +636,56 @@ func jsonValueToStructValue(v interface{}) *structpb.Value { // and will block, it is intended primarily for debugging or critical errors. // Prefer Log for most uses. func (l *Logger) LogSync(ctx context.Context, e Entry) error { - ent, err := toLogEntryInternal(e, l.client, l.client.parent) + ent, err := toLogEntryInternal(e, l, l.client.parent, 1) if err != nil { return err } + entries, hasInstrumentation := l.instrumentLogs([]*logpb.LogEntry{ent}) + if l.redirectOutputWriter != nil { + for _, ent = range entries { + err = serializeEntryToWriter(ent, l.redirectOutputWriter) + if err != nil { + break + } + } + return err + } _, err = l.client.client.WriteLogEntries(ctx, &logpb.WriteLogEntriesRequest{ - LogName: l.logName, - Resource: l.commonResource, - Labels: l.commonLabels, - Entries: []*logpb.LogEntry{ent}, + LogName: l.logName, + Resource: l.commonResource, + Labels: l.commonLabels, + Entries: entries, + PartialSuccess: l.partialSuccess || hasInstrumentation, }) return err } // Log buffers the Entry for output to the logging service. It never blocks. func (l *Logger) Log(e Entry) { - ent, err := toLogEntryInternal(e, l.client, l.client.parent) + l.logInternal(e, 1) +} + +func (l *Logger) logInternal(e Entry, skipLevels int) { + ent, err := toLogEntryInternal(e, l, l.client.parent, skipLevels+1) if err != nil { l.client.error(err) return } - if err := l.bundler.Add(ent, proto.Size(ent)); err != nil { - l.client.error(err) + + entries, _ := l.instrumentLogs([]*logpb.LogEntry{ent}) + if l.redirectOutputWriter != nil { + for _, ent = range entries { + err = serializeEntryToWriter(ent, l.redirectOutputWriter) + if err != nil { + l.client.error(err) + } + } + return + } + for _, ent = range entries { + if err := l.bundler.Add(ent, proto.Size(ent)); err != nil { + l.client.error(err) + } } } @@ -742,11 +701,16 @@ func (l *Logger) Flush() error { } func (l *Logger) writeLogEntries(entries []*logpb.LogEntry) { + partialSuccess := l.partialSuccess + if len(entries) > 1 { + partialSuccess = partialSuccess || hasInstrumentation(entries) + } req := &logpb.WriteLogEntriesRequest{ - LogName: l.logName, - Resource: l.commonResource, - Labels: l.commonLabels, - Entries: entries, + LogName: l.logName, + Resource: l.commonResource, + Labels: l.commonLabels, + Entries: entries, + PartialSuccess: partialSuccess, } ctx, afterCall := l.ctxFunc() ctx, cancel := context.WithTimeout(ctx, defaultWriteTimeout) @@ -767,7 +731,73 @@ func (l *Logger) writeLogEntries(entries []*logpb.LogEntry) { // (for example by calling SetFlags or SetPrefix). func (l *Logger) StandardLogger(s Severity) *log.Logger { return l.stdLoggers[s] } -var reCloudTraceContext = regexp.MustCompile( +// StandardLoggerFromTemplate returns a Go Standard Logging API *log.Logger. +// +// The returned logger emits logs using logging.(*Logger).Log() with an entry +// constructed from the provided template Entry struct. +// +// The caller is responsible for ensuring that the template Entry struct +// does not change during the the lifetime of the returned *log.Logger. +// +// Prefer (*Logger).StandardLogger() which is more efficient if the template +// only sets Severity. +func (l *Logger) StandardLoggerFromTemplate(template *Entry) *log.Logger { + return log.New(templateEntryWriter{l, template}, "", 0) +} + +func populateTraceInfo(e *Entry, req *http.Request) bool { + if req == nil { + if e.HTTPRequest != nil && e.HTTPRequest.Request != nil { + req = e.HTTPRequest.Request + } else { + return false + } + } + header := req.Header.Get("Traceparent") + if header != "" { + // do not use traceSampled flag defined by traceparent because + // flag's definition differs from expected by Cloud Tracing + traceID, spanID, _ := deconstructTraceParent(header) + if traceID != "" { + e.Trace = traceID + e.SpanID = spanID + return true + } + } + header = req.Header.Get("X-Cloud-Trace-Context") + if header != "" { + traceID, spanID, traceSampled := deconstructXCloudTraceContext(header) + if traceID != "" { + e.Trace = traceID + e.SpanID = spanID + // enforce sampling if required + e.TraceSampled = e.TraceSampled || traceSampled + return true + } + } + return false +} + +// As per format described at https://www.w3.org/TR/trace-context/#traceparent-header-field-values +var validTraceParentExpression = regexp.MustCompile(`^(00)-([a-fA-F\d]{32})-([a-f\d]{16})-([a-fA-F\d]{2})$`) + +func deconstructTraceParent(s string) (traceID, spanID string, traceSampled bool) { + matches := validTraceParentExpression.FindStringSubmatch(s) + if matches != nil { + // regexp package does not support negative lookahead preventing all 0 validations + if matches[2] == "00000000000000000000000000000000" || matches[3] == "0000000000000000" { + return + } + flags, err := strconv.ParseInt(matches[4], 16, 16) + if err == nil { + traceSampled = (flags & 0x01) == 1 + } + traceID, spanID = matches[2], matches[3] + } + return +} + +var validXCloudTraceContext = regexp.MustCompile( // Matches on "TRACE_ID" `([a-f\d]+)?` + // Matches on "/SPAN_ID" @@ -785,9 +815,11 @@ func deconstructXCloudTraceContext(s string) (traceID, spanID string, traceSampl // * traceID (optional): "105445aa7843bc8bf206b120001000" // * spanID (optional): "1" // * traceSampled (optional): true - matches := reCloudTraceContext.FindStringSubmatch(s) + matches := validXCloudTraceContext.FindStringSubmatch(s) - traceID, spanID, traceSampled = matches[1], matches[2], matches[3] == "1" + if matches != nil { + traceID, spanID, traceSampled = matches[1], matches[2], matches[3] == "1" + } if spanID == "0" { spanID = "" @@ -798,10 +830,12 @@ func deconstructXCloudTraceContext(s string) (traceID, spanID string, traceSampl // ToLogEntry takes an Entry structure and converts it to the LogEntry proto. // A parent can take any of the following forms: -// projects/PROJECT_ID -// folders/FOLDER_ID -// billingAccounts/ACCOUNT_ID -// organizations/ORG_ID +// +// projects/PROJECT_ID +// folders/FOLDER_ID +// billingAccounts/ACCOUNT_ID +// organizations/ORG_ID +// // for backwards compatibility, a string with no '/' is also allowed and is interpreted // as a project ID. // @@ -811,11 +845,20 @@ func deconstructXCloudTraceContext(s string) (traceID, spanID string, traceSampl // Logger.LogSync are used, it is intended to be used together with direct call // to WriteLogEntries method. func ToLogEntry(e Entry, parent string) (*logpb.LogEntry, error) { - // We have this method to support logging agents that need a bigger flexibility. - return toLogEntryInternal(e, nil, makeParent(parent)) + var l Logger + return l.ToLogEntry(e, parent) } -func toLogEntryInternal(e Entry, client *Client, parent string) (*logpb.LogEntry, error) { +// ToLogEntry for Logger instance +func (l *Logger) ToLogEntry(e Entry, parent string) (*logpb.LogEntry, error) { + parent, err := makeParent(parent) + if err != nil { + return nil, err + } + return toLogEntryInternal(e, l, parent, 1) +} + +func toLogEntryInternalImpl(e Entry, l *Logger, parent string, skipLevels int) (*logpb.LogEntry, error) { if e.LogName != "" { return nil, errors.New("logging: Entry.LogName should be not be set when writing") } @@ -823,33 +866,34 @@ func toLogEntryInternal(e Entry, client *Client, parent string) (*logpb.LogEntry if t.IsZero() { t = now() } - ts, err := ptypes.TimestampProto(t) - if err != nil { - return nil, err - } - if e.Trace == "" && e.HTTPRequest != nil && e.HTTPRequest.Request != nil { - traceHeader := e.HTTPRequest.Request.Header.Get("X-Cloud-Trace-Context") - if traceHeader != "" { - // Set to a relative resource name, as described at - // https://cloud.google.com/appengine/docs/flexible/go/writing-application-logs. - traceID, spanID, traceSampled := deconstructXCloudTraceContext(traceHeader) - if traceID != "" { - e.Trace = fmt.Sprintf("%s/traces/%s", parent, traceID) - } - if e.SpanID == "" { - e.SpanID = spanID + ts := timestamppb.New(t) + if l != nil && l.populateSourceLocation != DoNotPopulateSourceLocation && e.SourceLocation == nil { + if l.populateSourceLocation == AlwaysPopulateSourceLocation || + l.populateSourceLocation == PopulateSourceLocationForDebugEntries && e.Severity == Severity(Debug) { + // filename and line are captured for source code that calls + // skipLevels up the goroutine calling stack + 1 for this func. + pc, file, line, ok := runtime.Caller(skipLevels + 1) + if ok { + details := runtime.FuncForPC(pc) + e.SourceLocation = &logpb.LogEntrySourceLocation{ + File: file, + Function: details.Name(), + Line: int64(line), + } } - - // If we previously hadn't set TraceSampled, let's retrieve it - // from the HTTP request's header, as per: - // https://cloud.google.com/trace/docs/troubleshooting#force-trace - e.TraceSampled = e.TraceSampled || traceSampled + } + } + if e.Trace == "" { + populateTraceInfo(&e, nil) + // format trace + if e.Trace != "" && !strings.Contains(e.Trace, "/traces/") { + e.Trace = fmt.Sprintf("%s/traces/%s", parent, e.Trace) } } req, err := fromHTTPRequest(e.HTTPRequest) if err != nil { - if client != nil { - client.error(err) + if l != nil && l.client != nil { + l.client.error(err) } else { return nil, err } @@ -870,6 +914,8 @@ func toLogEntryInternal(e Entry, client *Client, parent string) (*logpb.LogEntry switch p := e.Payload.(type) { case string: ent.Payload = &logpb.LogEntry_TextPayload{TextPayload: p} + case *anypb.Any: + ent.Payload = &logpb.LogEntry_ProtoPayload{ProtoPayload: p} default: s, err := toProtoStruct(p) if err != nil { @@ -879,3 +925,83 @@ func toLogEntryInternal(e Entry, client *Client, parent string) (*logpb.LogEntry } return ent, nil } + +// entry represents the fields of a logging.Entry that can be parsed by Logging agent. +// See the mappings at https://cloud.google.com/logging/docs/structured-logging#special-payload-fields +type structuredLogEntry struct { + // JsonMessage map[string]interface{} `json:"message,omitempty"` + // TextMessage string `json:"message,omitempty"` + Message json.RawMessage `json:"message"` + Severity string `json:"severity,omitempty"` + HTTPRequest *logtypepb.HttpRequest `json:"httpRequest,omitempty"` + Timestamp string `json:"timestamp,omitempty"` + Labels map[string]string `json:"logging.googleapis.com/labels,omitempty"` + InsertID string `json:"logging.googleapis.com/insertId,omitempty"` + Operation *logpb.LogEntryOperation `json:"logging.googleapis.com/operation,omitempty"` + SourceLocation *logpb.LogEntrySourceLocation `json:"logging.googleapis.com/sourceLocation,omitempty"` + SpanID string `json:"logging.googleapis.com/spanId,omitempty"` + Trace string `json:"logging.googleapis.com/trace,omitempty"` + TraceSampled bool `json:"logging.googleapis.com/trace_sampled,omitempty"` +} + +func convertSnakeToMixedCase(snakeStr string) string { + words := strings.Split(snakeStr, "_") + mixedStr := words[0] + for _, word := range words[1:] { + mixedStr += strings.Title(word) + } + return mixedStr +} + +func (s structuredLogEntry) MarshalJSON() ([]byte, error) { + // extract structuredLogEntry into json map + type Alias structuredLogEntry + var mapData map[string]interface{} + data, err := json.Marshal(Alias(s)) + if err == nil { + err = json.Unmarshal(data, &mapData) + } + if err == nil { + // ensure all inner dicts use mixed case instead of snake case + innerDicts := [3]string{"httpRequest", "logging.googleapis.com/operation", "logging.googleapis.com/sourceLocation"} + for _, field := range innerDicts { + if fieldData, ok := mapData[field]; ok { + formattedFieldData := make(map[string]interface{}) + for k, v := range fieldData.(map[string]interface{}) { + formattedFieldData[convertSnakeToMixedCase(k)] = v + } + mapData[field] = formattedFieldData + } + } + // serialize json map into raw bytes + return json.Marshal(mapData) + } + return data, err +} + +func serializeEntryToWriter(entry *logpb.LogEntry, w io.Writer) error { + jsonifiedEntry := structuredLogEntry{ + Severity: entry.Severity.String(), + HTTPRequest: entry.HttpRequest, + Timestamp: entry.Timestamp.String(), + Labels: entry.Labels, + InsertID: entry.InsertId, + Operation: entry.Operation, + SourceLocation: entry.SourceLocation, + SpanID: entry.SpanId, + Trace: entry.Trace, + TraceSampled: entry.TraceSampled, + } + var err error + if entry.GetTextPayload() != "" { + jsonifiedEntry.Message, err = json.Marshal(entry.GetTextPayload()) + } else if entry.GetJsonPayload() != nil { + jsonifiedEntry.Message, err = json.Marshal(entry.GetJsonPayload().AsMap()) + } else { + return ErrRedirectProtoPayloadNotSupported + } + if err == nil { + err = json.NewEncoder(w).Encode(jsonifiedEntry) + } + return err +} diff --git a/vendor/cloud.google.com/go/logging/resource.go b/vendor/cloud.google.com/go/logging/resource.go index b73b289072a9d..f8416e436c268 100644 --- a/vendor/cloud.google.com/go/logging/resource.go +++ b/vendor/cloud.google.com/go/logging/resource.go @@ -15,12 +15,11 @@ package logging import ( - "io/ioutil" - "os" + "runtime" "strings" "sync" - "cloud.google.com/go/compute/metadata" + "cloud.google.com/go/logging/internal" mrpb "google.golang.org/genproto/googleapis/api/monitoredres" ) @@ -34,200 +33,224 @@ type commonResource struct{ *mrpb.MonitoredResource } func (r commonResource) set(l *Logger) { l.commonResource = r.MonitoredResource } -var detectedResource struct { - pb *mrpb.MonitoredResource - once sync.Once +type resource struct { + pb *mrpb.MonitoredResource + attrs internal.ResourceAttributesGetter + once *sync.Once } -// isAppEngine returns true for both standard and flex -func isAppEngine() bool { - _, service := os.LookupEnv("GAE_SERVICE") - _, version := os.LookupEnv("GAE_VERSION") - _, instance := os.LookupEnv("GAE_INSTANCE") +var detectedResource = &resource{ + attrs: internal.ResourceAttributes(), + once: new(sync.Once), +} - return service && version && instance +func (r *resource) metadataProjectID() string { + return r.attrs.Metadata("project/project-id") } -func detectAppEngineResource() *mrpb.MonitoredResource { - projectID, err := metadata.ProjectID() - if err != nil { - return nil +func (r *resource) metadataZone() string { + zone := r.attrs.Metadata("instance/zone") + if zone != "" { + return zone[strings.LastIndex(zone, "/")+1:] + } + return "" +} + +func (r *resource) metadataRegion() string { + region := r.attrs.Metadata("instance/region") + if region != "" { + return region[strings.LastIndex(region, "/")+1:] } + return "" +} + +// isMetadataActive queries valid response on "/computeMetadata/v1/" URL +func (r *resource) isMetadataActive() bool { + data := r.attrs.Metadata("") + return data != "" +} + +// isAppEngine returns true for both standard and flex +func (r *resource) isAppEngine() bool { + service := r.attrs.EnvVar("GAE_SERVICE") + version := r.attrs.EnvVar("GAE_VERSION") + instance := r.attrs.EnvVar("GAE_INSTANCE") + return service != "" && version != "" && instance != "" +} + +func detectAppEngineResource() *mrpb.MonitoredResource { + projectID := detectedResource.metadataProjectID() if projectID == "" { - projectID = os.Getenv("GOOGLE_CLOUD_PROJECT") + projectID = detectedResource.attrs.EnvVar("GOOGLE_CLOUD_PROJECT") } - zone, err := metadata.Zone() - if err != nil { + if projectID == "" { return nil } + zone := detectedResource.metadataZone() + service := detectedResource.attrs.EnvVar("GAE_SERVICE") + version := detectedResource.attrs.EnvVar("GAE_VERSION") return &mrpb.MonitoredResource{ Type: "gae_app", Labels: map[string]string{ - "project_id": projectID, - "module_id": os.Getenv("GAE_SERVICE"), - "version_id": os.Getenv("GAE_VERSION"), - "instance_id": os.Getenv("GAE_INSTANCE"), - "runtime": os.Getenv("GAE_RUNTIME"), - "zone": zone, + "project_id": projectID, + "module_id": service, + "version_id": version, + "zone": zone, }, } } -func isCloudFunction() bool { - // Reserved envvars in older function runtimes, e.g. Node.js 8, Python 3.7 and Go 1.11. - _, name := os.LookupEnv("FUNCTION_NAME") - _, region := os.LookupEnv("FUNCTION_REGION") - _, entry := os.LookupEnv("ENTRY_POINT") - - // Reserved envvars in newer function runtimes. - _, target := os.LookupEnv("FUNCTION_TARGET") - _, signature := os.LookupEnv("FUNCTION_SIGNATURE_TYPE") - _, service := os.LookupEnv("K_SERVICE") - return (name && region && entry) || (target && signature && service) +func (r *resource) isCloudFunction() bool { + target := r.attrs.EnvVar("FUNCTION_TARGET") + signature := r.attrs.EnvVar("FUNCTION_SIGNATURE_TYPE") + // note that this envvar is also present in Cloud Run environments + service := r.attrs.EnvVar("K_SERVICE") + return target != "" && signature != "" && service != "" } func detectCloudFunction() *mrpb.MonitoredResource { - projectID, err := metadata.ProjectID() - if err != nil { - return nil - } - zone, err := metadata.Zone() - if err != nil { + projectID := detectedResource.metadataProjectID() + if projectID == "" { return nil } - // Newer functions runtimes store name in K_SERVICE. - functionName, exists := os.LookupEnv("K_SERVICE") - if !exists { - functionName, _ = os.LookupEnv("FUNCTION_NAME") - } + region := detectedResource.metadataRegion() + functionName := detectedResource.attrs.EnvVar("K_SERVICE") return &mrpb.MonitoredResource{ Type: "cloud_function", Labels: map[string]string{ "project_id": projectID, - "region": regionFromZone(zone), + "region": region, "function_name": functionName, }, } } -func isCloudRun() bool { - _, config := os.LookupEnv("K_CONFIGURATION") - _, service := os.LookupEnv("K_SERVICE") - _, revision := os.LookupEnv("K_REVISION") - return config && service && revision +func (r *resource) isCloudRun() bool { + config := r.attrs.EnvVar("K_CONFIGURATION") + // note that this envvar is also present in Cloud Function environments + service := r.attrs.EnvVar("K_SERVICE") + revision := r.attrs.EnvVar("K_REVISION") + return config != "" && service != "" && revision != "" } func detectCloudRunResource() *mrpb.MonitoredResource { - projectID, err := metadata.ProjectID() - if err != nil { - return nil - } - zone, err := metadata.Zone() - if err != nil { + projectID := detectedResource.metadataProjectID() + if projectID == "" { return nil } + region := detectedResource.metadataRegion() + config := detectedResource.attrs.EnvVar("K_CONFIGURATION") + service := detectedResource.attrs.EnvVar("K_SERVICE") + revision := detectedResource.attrs.EnvVar("K_REVISION") return &mrpb.MonitoredResource{ Type: "cloud_run_revision", Labels: map[string]string{ "project_id": projectID, - "location": regionFromZone(zone), - "service_name": os.Getenv("K_SERVICE"), - "revision_name": os.Getenv("K_REVISION"), - "configuration_name": os.Getenv("K_CONFIGURATION"), + "location": region, + "service_name": service, + "revision_name": revision, + "configuration_name": config, }, } } -func isKubernetesEngine() bool { - clusterName, err := metadata.InstanceAttributeValue("cluster-name") - // Note: InstanceAttributeValue can return "", nil - if err != nil || clusterName == "" { +func (r *resource) isKubernetesEngine() bool { + clusterName := r.attrs.Metadata("instance/attributes/cluster-name") + if clusterName == "" { return false } return true } func detectKubernetesResource() *mrpb.MonitoredResource { - projectID, err := metadata.ProjectID() - if err != nil { - return nil - } - zone, err := metadata.Zone() - if err != nil { - return nil - } - clusterName, err := metadata.InstanceAttributeValue("cluster-name") - if err != nil { + projectID := detectedResource.metadataProjectID() + if projectID == "" { return nil } - namespaceBytes, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") - namespaceName := "" - if err == nil { - namespaceName = string(namespaceBytes) + zone := detectedResource.metadataZone() + clusterName := detectedResource.attrs.Metadata("instance/attributes/cluster-name") + namespaceName := detectedResource.attrs.ReadAll("/var/run/secrets/kubernetes.io/serviceaccount/namespace") + if namespaceName == "" { + // if automountServiceAccountToken is disabled allow to customize + // the namespace via environment + namespaceName = detectedResource.attrs.EnvVar("NAMESPACE_NAME") } + // note: if deployment customizes hostname, HOSTNAME envvar will have invalid content + podName := detectedResource.attrs.EnvVar("HOSTNAME") + // there is no way to derive container name from within container; use custom envvar if available + containerName := detectedResource.attrs.EnvVar("CONTAINER_NAME") return &mrpb.MonitoredResource{ Type: "k8s_container", Labels: map[string]string{ "cluster_name": clusterName, "location": zone, "project_id": projectID, - "pod_name": os.Getenv("HOSTNAME"), + "pod_name": podName, "namespace_name": namespaceName, - // To get the `container_name` label, users need to explicitly provide it. - "container_name": os.Getenv("CONTAINER_NAME"), + "container_name": containerName, }, } } -func detectGCEResource() *mrpb.MonitoredResource { - projectID, err := metadata.ProjectID() - if err != nil { - return nil - } - id, err := metadata.InstanceID() - if err != nil { - return nil - } - zone, err := metadata.Zone() - if err != nil { - return nil - } - name, err := metadata.InstanceName() - if err != nil { +func (r *resource) isComputeEngine() bool { + preempted := r.attrs.Metadata("instance/preempted") + platform := r.attrs.Metadata("instance/cpu-platform") + appBucket := r.attrs.Metadata("instance/attributes/gae_app_bucket") + return preempted != "" && platform != "" && appBucket == "" +} + +func detectComputeEngineResource() *mrpb.MonitoredResource { + projectID := detectedResource.metadataProjectID() + if projectID == "" { return nil } + id := detectedResource.attrs.Metadata("instance/id") + zone := detectedResource.metadataZone() return &mrpb.MonitoredResource{ Type: "gce_instance", Labels: map[string]string{ - "project_id": projectID, - "instance_id": id, - "instance_name": name, - "zone": zone, + "project_id": projectID, + "instance_id": id, + "zone": zone, }, } } func detectResource() *mrpb.MonitoredResource { detectedResource.once.Do(func() { - switch { - // AppEngine, Functions, CloudRun, Kubernetes are detected first, - // as metadata.OnGCE() erroneously returns true on these runtimes. - case isAppEngine(): - detectedResource.pb = detectAppEngineResource() - case isCloudFunction(): - detectedResource.pb = detectCloudFunction() - case isCloudRun(): - detectedResource.pb = detectCloudRunResource() - case isKubernetesEngine(): - detectedResource.pb = detectKubernetesResource() - case metadata.OnGCE(): - detectedResource.pb = detectGCEResource() + if detectedResource.isMetadataActive() { + name := systemProductName() + switch { + case name == "Google App Engine", detectedResource.isAppEngine(): + detectedResource.pb = detectAppEngineResource() + case name == "Google Cloud Functions", detectedResource.isCloudFunction(): + detectedResource.pb = detectCloudFunction() + case name == "Google Cloud Run", detectedResource.isCloudRun(): + detectedResource.pb = detectCloudRunResource() + // cannot use name validation for GKE and GCE because + // both of them set product name to "Google Compute Engine" + case detectedResource.isKubernetesEngine(): + detectedResource.pb = detectKubernetesResource() + case detectedResource.isComputeEngine(): + detectedResource.pb = detectComputeEngineResource() + } } }) return detectedResource.pb } +// systemProductName reads resource type on the Linux-based environments such as +// Cloud Functions, Cloud Run, GKE, GCE, GAE, etc. +func systemProductName() string { + if runtime.GOOS != "linux" { + // We don't have any non-Linux clues available, at least yet. + return "" + } + slurp := detectedResource.attrs.ReadAll("/sys/class/dmi/id/product_name") + return strings.TrimSpace(slurp) +} + var resourceInfo = map[string]struct{ rtype, label string }{ "organizations": {"organization", "organization_id"}, "folders": {"folder", "folder_id"}, @@ -250,14 +273,6 @@ func monitoredResource(parent string) *mrpb.MonitoredResource { } } -func regionFromZone(zone string) string { - cutoff := strings.LastIndex(zone, "-") - if cutoff > 0 { - return zone[:cutoff] - } - return zone -} - func globalResource(projectID string) *mrpb.MonitoredResource { return &mrpb.MonitoredResource{ Type: "global", diff --git a/vendor/cloud.google.com/go/longrunning/CHANGES.md b/vendor/cloud.google.com/go/longrunning/CHANGES.md new file mode 100644 index 0000000000000..d4b44b09aca2e --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/CHANGES.md @@ -0,0 +1,26 @@ +# Changes + +## [0.4.1](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.4.0...longrunning/v0.4.1) (2023-02-14) + + +### Bug Fixes + +* **longrunning:** Properly parse errors with apierror ([#7392](https://github.com/googleapis/google-cloud-go/issues/7392)) ([e768e48](https://github.com/googleapis/google-cloud-go/commit/e768e487e10b197ba42a2339014136d066190610)) + +## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.3.0...longrunning/v0.4.0) (2023-01-04) + + +### Features + +* **longrunning:** Add REST client ([06a54a1](https://github.com/googleapis/google-cloud-go/commit/06a54a16a5866cce966547c51e203b9e09a25bc0)) + +## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/longrunning/v0.2.1...longrunning/v0.3.0) (2022-11-03) + + +### Features + +* **longrunning:** rewrite signatures in terms of new location ([3c4b2b3](https://github.com/googleapis/google-cloud-go/commit/3c4b2b34565795537aac1661e6af2442437e34ad)) + +## v0.1.0 + +Initial release. diff --git a/vendor/cloud.google.com/go/longrunning/LICENSE b/vendor/cloud.google.com/go/longrunning/LICENSE new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/longrunning/README.md b/vendor/cloud.google.com/go/longrunning/README.md new file mode 100644 index 0000000000000..a07f3093fd38a --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/README.md @@ -0,0 +1,26 @@ +# longrunning + +[![Go Reference](https://pkg.go.dev/badge/cloud.google.com/go/longrunning.svg)](https://pkg.go.dev/cloud.google.com/go/longrunning) + +A helper library for working with long running operations. + +## Install + +```bash +go get cloud.google.com/go/longrunning +``` + +## Go Version Support + +See the [Go Versions Supported](https://github.com/googleapis/google-cloud-go#go-versions-supported) +section in the root directory's README. + +## Contributing + +Contributions are welcome. Please, see the [CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md) +document for details. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. See +[Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. diff --git a/vendor/cloud.google.com/go/longrunning/autogen/doc.go b/vendor/cloud.google.com/go/longrunning/autogen/doc.go new file mode 100644 index 0000000000000..58b74d96057ff --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/autogen/doc.go @@ -0,0 +1,199 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +// Package longrunning is an auto-generated package for the +// Long Running Operations API. +// +// NOTE: This package is in alpha. It is not stable, and is likely to change. +// +// # General documentation +// +// For information about setting deadlines, reusing contexts, and more +// please visit https://pkg.go.dev/cloud.google.com/go. +// +// # Example usage +// +// To get started with this package, create a client. +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := longrunning.NewOperationsClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +// +// The client will use your default application credentials. Clients should be reused instead of created as needed. +// The methods of Client are safe for concurrent use by multiple goroutines. +// The returned client must be Closed when it is done being used. +// +// # Using the Client +// +// The following is an example of making an API call with the newly created client. +// +// ctx := context.Background() +// // This snippet has been automatically generated and should be regarded as a code template only. +// // It will require modifications to work: +// // - It may require correct/in-range values for request initialization. +// // - It may require specifying regional endpoints when creating the service client as shown in: +// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options +// c, err := longrunning.NewOperationsClient(ctx) +// if err != nil { +// // TODO: Handle error. +// } +// defer c.Close() +// +// req := &longrunningpb.ListOperationsRequest{ +// // TODO: Fill request struct fields. +// // See https://pkg.go.dev/cloud.google.com/go/longrunning/autogen/longrunningpb#ListOperationsRequest. +// } +// it := c.ListOperations(ctx, req) +// for { +// resp, err := it.Next() +// if err == iterator.Done { +// break +// } +// if err != nil { +// // TODO: Handle error. +// } +// // TODO: Use resp. +// _ = resp +// } +// +// # Use of Context +// +// The ctx passed to NewOperationsClient is used for authentication requests and +// for creating the underlying connection, but is not used for subsequent calls. +// Individual methods on the client use the ctx given to them. +// +// To close the open connection, use the Close() method. +package longrunning // import "cloud.google.com/go/longrunning/autogen" + +import ( + "context" + "fmt" + "net/http" + "os" + "runtime" + "strconv" + "strings" + "unicode" + + "google.golang.org/api/option" + "google.golang.org/grpc/metadata" +) + +// For more information on implementing a client constructor hook, see +// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors. +type clientHookParams struct{} +type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) + +var versionClient string + +func getVersionClient() string { + if versionClient == "" { + return "UNKNOWN" + } + return versionClient +} + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +func checkDisableDeadlines() (bool, error) { + raw, ok := os.LookupEnv("GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE") + if !ok { + return false, nil + } + + b, err := strconv.ParseBool(raw) + return b, err +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "", + } +} + +// versionGo returns the Go runtime version. The returned string +// has no whitespace, suitable for reporting in header. +func versionGo() string { + const develPrefix = "devel +" + + s := runtime.Version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } + + notSemverRune := func(r rune) bool { + return !strings.ContainsRune("0123456789.", r) + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + s += "-" + prerelease + } + return s + } + return "UNKNOWN" +} + +// maybeUnknownEnum wraps the given proto-JSON parsing error if it is the result +// of receiving an unknown enum value. +func maybeUnknownEnum(err error) error { + if strings.Contains(err.Error(), "invalid value for enum type") { + err = fmt.Errorf("received an unknown enum value; a later version of the library may support it: %w", err) + } + return err +} + +// buildHeaders extracts metadata from the outgoing context, joins it with any other +// given metadata, and converts them into a http.Header. +func buildHeaders(ctx context.Context, mds ...metadata.MD) http.Header { + if cmd, ok := metadata.FromOutgoingContext(ctx); ok { + mds = append(mds, cmd) + } + md := metadata.Join(mds...) + return http.Header(md) +} diff --git a/vendor/cloud.google.com/go/longrunning/autogen/from_conn.go b/vendor/cloud.google.com/go/longrunning/autogen/from_conn.go new file mode 100644 index 0000000000000..f09714b9b32b8 --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/autogen/from_conn.go @@ -0,0 +1,30 @@ +// Copyright 2020, Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package longrunning + +import ( + "context" + + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +// InternalFromConn is for use by the Google Cloud Libraries only. +// +// Deprecated. Use `NewOperationsClient(ctx, option.WithGRPCConn(conn))` instead. +func InternalFromConn(conn *grpc.ClientConn) *OperationsClient { + c, _ := NewOperationsClient(context.Background(), option.WithGRPCConn(conn)) + return c +} diff --git a/vendor/cloud.google.com/go/longrunning/autogen/gapic_metadata.json b/vendor/cloud.google.com/go/longrunning/autogen/gapic_metadata.json new file mode 100644 index 0000000000000..5271428216654 --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/autogen/gapic_metadata.json @@ -0,0 +1,73 @@ +{ + "schema": "1.0", + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods.", + "language": "go", + "protoPackage": "google.longrunning", + "libraryPackage": "cloud.google.com/go/longrunning/autogen", + "services": { + "Operations": { + "clients": { + "grpc": { + "libraryClient": "OperationsClient", + "rpcs": { + "CancelOperation": { + "methods": [ + "CancelOperation" + ] + }, + "DeleteOperation": { + "methods": [ + "DeleteOperation" + ] + }, + "GetOperation": { + "methods": [ + "GetOperation" + ] + }, + "ListOperations": { + "methods": [ + "ListOperations" + ] + }, + "WaitOperation": { + "methods": [ + "WaitOperation" + ] + } + } + }, + "rest": { + "libraryClient": "OperationsClient", + "rpcs": { + "CancelOperation": { + "methods": [ + "CancelOperation" + ] + }, + "DeleteOperation": { + "methods": [ + "DeleteOperation" + ] + }, + "GetOperation": { + "methods": [ + "GetOperation" + ] + }, + "ListOperations": { + "methods": [ + "ListOperations" + ] + }, + "WaitOperation": { + "methods": [ + "WaitOperation" + ] + } + } + } + } + } + } +} diff --git a/vendor/cloud.google.com/go/longrunning/autogen/info.go b/vendor/cloud.google.com/go/longrunning/autogen/info.go new file mode 100644 index 0000000000000..b006c4d018e42 --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/autogen/info.go @@ -0,0 +1,24 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package longrunning + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Also passes any +// provided key-value pairs. Intended for use by Google-written clients. +// +// Internal use only. +func (c *OperationsClient) SetGoogleClientInfo(keyval ...string) { + c.setGoogleClientInfo(keyval...) +} diff --git a/vendor/google.golang.org/genproto/googleapis/longrunning/operations.pb.go b/vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go similarity index 99% rename from vendor/google.golang.org/genproto/googleapis/longrunning/operations.pb.go rename to vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go index 44b30dca5490f..18f714760494f 100644 --- a/vendor/google.golang.org/genproto/googleapis/longrunning/operations.pb.go +++ b/vendor/cloud.google.com/go/longrunning/autogen/longrunningpb/operations.pb.go @@ -15,10 +15,10 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.12 // source: google/longrunning/operations.proto -package longrunning +package longrunningpb import ( context "context" @@ -70,6 +70,7 @@ type Operation struct { // If `done` == `true`, exactly one of `error` or `response` is set. // // Types that are assignable to Result: + // // *Operation_Error // *Operation_Response Result isOperation_Result `protobuf_oneof:"result"` @@ -519,13 +520,13 @@ func (x *WaitOperationRequest) GetTimeout() *durationpb.Duration { // // Example: // -// rpc LongRunningRecognize(LongRunningRecognizeRequest) -// returns (google.longrunning.Operation) { -// option (google.longrunning.operation_info) = { -// response_type: "LongRunningRecognizeResponse" -// metadata_type: "LongRunningRecognizeMetadata" -// }; -// } +// rpc LongRunningRecognize(LongRunningRecognizeRequest) +// returns (google.longrunning.Operation) { +// option (google.longrunning.operation_info) = { +// response_type: "LongRunningRecognizeResponse" +// metadata_type: "LongRunningRecognizeMetadata" +// }; +// } type OperationInfo struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go b/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go new file mode 100644 index 0000000000000..3fa60d3010577 --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go @@ -0,0 +1,917 @@ +// Copyright 2023 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package longrunning + +import ( + "bytes" + "context" + "fmt" + "io/ioutil" + "math" + "net/http" + "net/url" + "time" + + longrunningpb "cloud.google.com/go/longrunning/autogen/longrunningpb" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/googleapi" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" + gtransport "google.golang.org/api/transport/grpc" + httptransport "google.golang.org/api/transport/http" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" +) + +var newOperationsClientHook clientHook + +// OperationsCallOptions contains the retry settings for each method of OperationsClient. +type OperationsCallOptions struct { + ListOperations []gax.CallOption + GetOperation []gax.CallOption + DeleteOperation []gax.CallOption + CancelOperation []gax.CallOption + WaitOperation []gax.CallOption +} + +func defaultOperationsGRPCClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("longrunning.googleapis.com:443"), + internaloption.WithDefaultMTLSEndpoint("longrunning.mtls.googleapis.com:443"), + internaloption.WithDefaultAudience("https://longrunning.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + internaloption.EnableJwtWithScope(), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultOperationsCallOptions() *OperationsCallOptions { + return &OperationsCallOptions{ + ListOperations: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 500 * time.Millisecond, + Max: 10000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + GetOperation: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 500 * time.Millisecond, + Max: 10000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + DeleteOperation: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 500 * time.Millisecond, + Max: 10000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + CancelOperation: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 500 * time.Millisecond, + Max: 10000 * time.Millisecond, + Multiplier: 2.00, + }) + }), + }, + WaitOperation: []gax.CallOption{}, + } +} + +func defaultOperationsRESTCallOptions() *OperationsCallOptions { + return &OperationsCallOptions{ + ListOperations: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 500 * time.Millisecond, + Max: 10000 * time.Millisecond, + Multiplier: 2.00, + }, + http.StatusServiceUnavailable) + }), + }, + GetOperation: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 500 * time.Millisecond, + Max: 10000 * time.Millisecond, + Multiplier: 2.00, + }, + http.StatusServiceUnavailable) + }), + }, + DeleteOperation: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 500 * time.Millisecond, + Max: 10000 * time.Millisecond, + Multiplier: 2.00, + }, + http.StatusServiceUnavailable) + }), + }, + CancelOperation: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnHTTPCodes(gax.Backoff{ + Initial: 500 * time.Millisecond, + Max: 10000 * time.Millisecond, + Multiplier: 2.00, + }, + http.StatusServiceUnavailable) + }), + }, + WaitOperation: []gax.CallOption{}, + } +} + +// internalOperationsClient is an interface that defines the methods available from Long Running Operations API. +type internalOperationsClient interface { + Close() error + setGoogleClientInfo(...string) + Connection() *grpc.ClientConn + ListOperations(context.Context, *longrunningpb.ListOperationsRequest, ...gax.CallOption) *OperationIterator + GetOperation(context.Context, *longrunningpb.GetOperationRequest, ...gax.CallOption) (*longrunningpb.Operation, error) + DeleteOperation(context.Context, *longrunningpb.DeleteOperationRequest, ...gax.CallOption) error + CancelOperation(context.Context, *longrunningpb.CancelOperationRequest, ...gax.CallOption) error + WaitOperation(context.Context, *longrunningpb.WaitOperationRequest, ...gax.CallOption) (*longrunningpb.Operation, error) +} + +// OperationsClient is a client for interacting with Long Running Operations API. +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +// +// Manages long-running operations with an API service. +// +// When an API method normally takes long time to complete, it can be designed +// to return Operation to the client, and the client can use this +// interface to receive the real response asynchronously by polling the +// operation resource, or pass the operation resource to another API (such as +// Google Cloud Pub/Sub API) to receive the response. Any API service that +// returns long-running operations should implement the Operations interface +// so developers can have a consistent client experience. +type OperationsClient struct { + // The internal transport-dependent client. + internalClient internalOperationsClient + + // The call options for this service. + CallOptions *OperationsCallOptions +} + +// Wrapper methods routed to the internal client. + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *OperationsClient) Close() error { + return c.internalClient.Close() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *OperationsClient) setGoogleClientInfo(keyval ...string) { + c.internalClient.setGoogleClientInfo(keyval...) +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *OperationsClient) Connection() *grpc.ClientConn { + return c.internalClient.Connection() +} + +// ListOperations lists operations that match the specified filter in the request. If the +// server doesn’t support this method, it returns UNIMPLEMENTED. +// +// NOTE: the name binding allows API services to override the binding +// to use different resource name schemes, such as users/*/operations. To +// override the binding, API services can add a binding such as +// "/v1/{name=users/*}/operations" to their service configuration. +// For backwards compatibility, the default name includes the operations +// collection id, however overriding users must ensure the name binding +// is the parent resource, without the operations collection id. +func (c *OperationsClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator { + return c.internalClient.ListOperations(ctx, req, opts...) +} + +// GetOperation gets the latest state of a long-running operation. Clients can use this +// method to poll the operation result at intervals as recommended by the API +// service. +func (c *OperationsClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + return c.internalClient.GetOperation(ctx, req, opts...) +} + +// DeleteOperation deletes a long-running operation. This method indicates that the client is +// no longer interested in the operation result. It does not cancel the +// operation. If the server doesn’t support this method, it returns +// google.rpc.Code.UNIMPLEMENTED. +func (c *OperationsClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error { + return c.internalClient.DeleteOperation(ctx, req, opts...) +} + +// CancelOperation starts asynchronous cancellation on a long-running operation. The server +// makes a best effort to cancel the operation, but success is not +// guaranteed. If the server doesn’t support this method, it returns +// google.rpc.Code.UNIMPLEMENTED. Clients can use +// Operations.GetOperation or +// other methods to check whether the cancellation succeeded or whether the +// operation completed despite cancellation. On successful cancellation, +// the operation is not deleted; instead, it becomes an operation with +// an Operation.error value with a google.rpc.Status.code of 1, +// corresponding to Code.CANCELLED. +func (c *OperationsClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error { + return c.internalClient.CancelOperation(ctx, req, opts...) +} + +// WaitOperation waits until the specified long-running operation is done or reaches at most +// a specified timeout, returning the latest state. If the operation is +// already done, the latest state is immediately returned. If the timeout +// specified is greater than the default HTTP/RPC timeout, the HTTP/RPC +// timeout is used. If the server does not support this method, it returns +// google.rpc.Code.UNIMPLEMENTED. +// Note that this method is on a best-effort basis. It may return the latest +// state before the specified timeout (including immediately), meaning even an +// immediate response is no guarantee that the operation is done. +func (c *OperationsClient) WaitOperation(ctx context.Context, req *longrunningpb.WaitOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + return c.internalClient.WaitOperation(ctx, req, opts...) +} + +// operationsGRPCClient is a client for interacting with Long Running Operations API over gRPC transport. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type operationsGRPCClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // flag to opt out of default deadlines via GOOGLE_API_GO_EXPERIMENTAL_DISABLE_DEFAULT_DEADLINE + disableDeadlines bool + + // Points back to the CallOptions field of the containing OperationsClient + CallOptions **OperationsCallOptions + + // The gRPC API client. + operationsClient longrunningpb.OperationsClient + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewOperationsClient creates a new operations client based on gRPC. +// The returned client must be Closed when it is done being used to clean up its underlying connections. +// +// Manages long-running operations with an API service. +// +// When an API method normally takes long time to complete, it can be designed +// to return Operation to the client, and the client can use this +// interface to receive the real response asynchronously by polling the +// operation resource, or pass the operation resource to another API (such as +// Google Cloud Pub/Sub API) to receive the response. Any API service that +// returns long-running operations should implement the Operations interface +// so developers can have a consistent client experience. +func NewOperationsClient(ctx context.Context, opts ...option.ClientOption) (*OperationsClient, error) { + clientOpts := defaultOperationsGRPCClientOptions() + if newOperationsClientHook != nil { + hookOpts, err := newOperationsClientHook(ctx, clientHookParams{}) + if err != nil { + return nil, err + } + clientOpts = append(clientOpts, hookOpts...) + } + + disableDeadlines, err := checkDisableDeadlines() + if err != nil { + return nil, err + } + + connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...) + if err != nil { + return nil, err + } + client := OperationsClient{CallOptions: defaultOperationsCallOptions()} + + c := &operationsGRPCClient{ + connPool: connPool, + disableDeadlines: disableDeadlines, + operationsClient: longrunningpb.NewOperationsClient(connPool), + CallOptions: &client.CallOptions, + } + c.setGoogleClientInfo() + + client.internalClient = c + + return &client, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: Connections are now pooled so this method does not always +// return the same resource. +func (c *operationsGRPCClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *operationsGRPCClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *operationsGRPCClient) Close() error { + return c.connPool.Close() +} + +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type operationsRESTClient struct { + // The http endpoint to connect to. + endpoint string + + // The http client. + httpClient *http.Client + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD + + // Points back to the CallOptions field of the containing OperationsClient + CallOptions **OperationsCallOptions +} + +// NewOperationsRESTClient creates a new operations rest client. +// +// Manages long-running operations with an API service. +// +// When an API method normally takes long time to complete, it can be designed +// to return Operation to the client, and the client can use this +// interface to receive the real response asynchronously by polling the +// operation resource, or pass the operation resource to another API (such as +// Google Cloud Pub/Sub API) to receive the response. Any API service that +// returns long-running operations should implement the Operations interface +// so developers can have a consistent client experience. +func NewOperationsRESTClient(ctx context.Context, opts ...option.ClientOption) (*OperationsClient, error) { + clientOpts := append(defaultOperationsRESTClientOptions(), opts...) + httpClient, endpoint, err := httptransport.NewClient(ctx, clientOpts...) + if err != nil { + return nil, err + } + + callOpts := defaultOperationsRESTCallOptions() + c := &operationsRESTClient{ + endpoint: endpoint, + httpClient: httpClient, + CallOptions: &callOpts, + } + c.setGoogleClientInfo() + + return &OperationsClient{internalClient: c, CallOptions: callOpts}, nil +} + +func defaultOperationsRESTClientOptions() []option.ClientOption { + return []option.ClientOption{ + internaloption.WithDefaultEndpoint("https://longrunning.googleapis.com"), + internaloption.WithDefaultMTLSEndpoint("https://longrunning.mtls.googleapis.com"), + internaloption.WithDefaultAudience("https://longrunning.googleapis.com/"), + internaloption.WithDefaultScopes(DefaultAuthScopes()...), + } +} + +// setGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *operationsRESTClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "rest", "UNKNOWN") + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *operationsRESTClient) Close() error { + // Replace httpClient with nil to force cleanup. + c.httpClient = nil + return nil +} + +// Connection returns a connection to the API service. +// +// Deprecated: This method always returns nil. +func (c *operationsRESTClient) Connection() *grpc.ClientConn { + return nil +} +func (c *operationsGRPCClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).ListOperations[0:len((*c.CallOptions).ListOperations):len((*c.CallOptions).ListOperations)], opts...) + it := &OperationIterator{} + req = proto.Clone(req).(*longrunningpb.ListOperationsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) { + resp := &longrunningpb.ListOperationsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.operationsClient.ListOperations(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.GetOperations(), resp.GetNextPageToken(), nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +func (c *operationsGRPCClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 10000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.operationsClient.GetOperation(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *operationsGRPCClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 10000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).DeleteOperation[0:len((*c.CallOptions).DeleteOperation):len((*c.CallOptions).DeleteOperation)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.operationsClient.DeleteOperation(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *operationsGRPCClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error { + if _, ok := ctx.Deadline(); !ok && !c.disableDeadlines { + cctx, cancel := context.WithTimeout(ctx, 10000*time.Millisecond) + defer cancel() + ctx = cctx + } + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append((*c.CallOptions).CancelOperation[0:len((*c.CallOptions).CancelOperation):len((*c.CallOptions).CancelOperation)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.operationsClient.CancelOperation(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +func (c *operationsGRPCClient) WaitOperation(ctx context.Context, req *longrunningpb.WaitOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append((*c.CallOptions).WaitOperation[0:len((*c.CallOptions).WaitOperation):len((*c.CallOptions).WaitOperation)], opts...) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.operationsClient.WaitOperation(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListOperations lists operations that match the specified filter in the request. If the +// server doesn’t support this method, it returns UNIMPLEMENTED. +// +// NOTE: the name binding allows API services to override the binding +// to use different resource name schemes, such as users/*/operations. To +// override the binding, API services can add a binding such as +// "/v1/{name=users/*}/operations" to their service configuration. +// For backwards compatibility, the default name includes the operations +// collection id, however overriding users must ensure the name binding +// is the parent resource, without the operations collection id. +func (c *operationsRESTClient) ListOperations(ctx context.Context, req *longrunningpb.ListOperationsRequest, opts ...gax.CallOption) *OperationIterator { + it := &OperationIterator{} + req = proto.Clone(req).(*longrunningpb.ListOperationsRequest) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + it.InternalFetch = func(pageSize int, pageToken string) ([]*longrunningpb.Operation, string, error) { + resp := &longrunningpb.ListOperationsResponse{} + if pageToken != "" { + req.PageToken = pageToken + } + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else if pageSize != 0 { + req.PageSize = int32(pageSize) + } + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, "", err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + params := url.Values{} + if req.GetFilter() != "" { + params.Add("filter", fmt.Sprintf("%v", req.GetFilter())) + } + if req.GetPageSize() != 0 { + params.Add("pageSize", fmt.Sprintf("%v", req.GetPageSize())) + } + if req.GetPageToken() != "" { + params.Add("pageToken", fmt.Sprintf("%v", req.GetPageToken())) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + headers := buildHeaders(ctx, c.xGoogMetadata, metadata.Pairs("Content-Type", "application/json")) + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := ioutil.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return maybeUnknownEnum(err) + } + + return nil + }, opts...) + if e != nil { + return nil, "", e + } + it.Response = resp + return resp.GetOperations(), resp.GetNextPageToken(), nil + } + + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.GetPageSize()) + it.pageInfo.Token = req.GetPageToken() + + return it +} + +// GetOperation gets the latest state of a long-running operation. Clients can use this +// method to poll the operation result at intervals as recommended by the API +// service. +func (c *operationsRESTClient) GetOperation(ctx context.Context, req *longrunningpb.GetOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + // Build HTTP headers from client and context metadata. + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + headers := buildHeaders(ctx, c.xGoogMetadata, md, metadata.Pairs("Content-Type", "application/json")) + opts = append((*c.CallOptions).GetOperation[0:len((*c.CallOptions).GetOperation):len((*c.CallOptions).GetOperation)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &longrunningpb.Operation{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("GET", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := ioutil.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return maybeUnknownEnum(err) + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// DeleteOperation deletes a long-running operation. This method indicates that the client is +// no longer interested in the operation result. It does not cancel the +// operation. If the server doesn’t support this method, it returns +// google.rpc.Code.UNIMPLEMENTED. +func (c *operationsRESTClient) DeleteOperation(ctx context.Context, req *longrunningpb.DeleteOperationRequest, opts ...gax.CallOption) error { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return err + } + baseUrl.Path += fmt.Sprintf("/v1/%v", req.GetName()) + + // Build HTTP headers from client and context metadata. + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + headers := buildHeaders(ctx, c.xGoogMetadata, md, metadata.Pairs("Content-Type", "application/json")) + return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("DELETE", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + // Returns nil if there is no error, otherwise wraps + // the response code and body into a non-nil error + return googleapi.CheckResponse(httpRsp) + }, opts...) +} + +// CancelOperation starts asynchronous cancellation on a long-running operation. The server +// makes a best effort to cancel the operation, but success is not +// guaranteed. If the server doesn’t support this method, it returns +// google.rpc.Code.UNIMPLEMENTED. Clients can use +// Operations.GetOperation or +// other methods to check whether the cancellation succeeded or whether the +// operation completed despite cancellation. On successful cancellation, +// the operation is not deleted; instead, it becomes an operation with +// an Operation.error value with a google.rpc.Status.code of 1, +// corresponding to Code.CANCELLED. +func (c *operationsRESTClient) CancelOperation(ctx context.Context, req *longrunningpb.CancelOperationRequest, opts ...gax.CallOption) error { + m := protojson.MarshalOptions{AllowPartial: true, UseEnumNumbers: true} + jsonReq, err := m.Marshal(req) + if err != nil { + return err + } + + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return err + } + baseUrl.Path += fmt.Sprintf("/v1/%v:cancel", req.GetName()) + + // Build HTTP headers from client and context metadata. + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + + headers := buildHeaders(ctx, c.xGoogMetadata, md, metadata.Pairs("Content-Type", "application/json")) + return gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("POST", baseUrl.String(), bytes.NewReader(jsonReq)) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + // Returns nil if there is no error, otherwise wraps + // the response code and body into a non-nil error + return googleapi.CheckResponse(httpRsp) + }, opts...) +} + +// WaitOperation waits until the specified long-running operation is done or reaches at most +// a specified timeout, returning the latest state. If the operation is +// already done, the latest state is immediately returned. If the timeout +// specified is greater than the default HTTP/RPC timeout, the HTTP/RPC +// timeout is used. If the server does not support this method, it returns +// google.rpc.Code.UNIMPLEMENTED. +// Note that this method is on a best-effort basis. It may return the latest +// state before the specified timeout (including immediately), meaning even an +// immediate response is no guarantee that the operation is done. +func (c *operationsRESTClient) WaitOperation(ctx context.Context, req *longrunningpb.WaitOperationRequest, opts ...gax.CallOption) (*longrunningpb.Operation, error) { + baseUrl, err := url.Parse(c.endpoint) + if err != nil { + return nil, err + } + baseUrl.Path += fmt.Sprintf("") + + params := url.Values{} + if req.GetName() != "" { + params.Add("name", fmt.Sprintf("%v", req.GetName())) + } + if req.GetTimeout() != nil { + timeout, err := protojson.Marshal(req.GetTimeout()) + if err != nil { + return nil, err + } + params.Add("timeout", string(timeout)) + } + + baseUrl.RawQuery = params.Encode() + + // Build HTTP headers from client and context metadata. + headers := buildHeaders(ctx, c.xGoogMetadata, metadata.Pairs("Content-Type", "application/json")) + opts = append((*c.CallOptions).WaitOperation[0:len((*c.CallOptions).WaitOperation):len((*c.CallOptions).WaitOperation)], opts...) + unm := protojson.UnmarshalOptions{AllowPartial: true, DiscardUnknown: true} + resp := &longrunningpb.Operation{} + e := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + if settings.Path != "" { + baseUrl.Path = settings.Path + } + httpReq, err := http.NewRequest("", baseUrl.String(), nil) + if err != nil { + return err + } + httpReq = httpReq.WithContext(ctx) + httpReq.Header = headers + + httpRsp, err := c.httpClient.Do(httpReq) + if err != nil { + return err + } + defer httpRsp.Body.Close() + + if err = googleapi.CheckResponse(httpRsp); err != nil { + return err + } + + buf, err := ioutil.ReadAll(httpRsp.Body) + if err != nil { + return err + } + + if err := unm.Unmarshal(buf, resp); err != nil { + return maybeUnknownEnum(err) + } + + return nil + }, opts...) + if e != nil { + return nil, e + } + return resp, nil +} + +// OperationIterator manages a stream of *longrunningpb.Operation. +type OperationIterator struct { + items []*longrunningpb.Operation + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*longrunningpb.Operation, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *OperationIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *OperationIterator) Next() (*longrunningpb.Operation, error) { + var item *longrunningpb.Operation + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *OperationIterator) bufLen() int { + return len(it.items) +} + +func (it *OperationIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/longrunning/longrunning.go b/vendor/cloud.google.com/go/longrunning/longrunning.go new file mode 100644 index 0000000000000..40186e29fb913 --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/longrunning.go @@ -0,0 +1,179 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package longrunning supports Long Running Operations for the Google Cloud Libraries. +// See google.golang.org/genproto/googleapis/longrunning for its service definition. +// +// Users of the Google Cloud Libraries will typically not use this package directly. +// Instead they will call functions returning Operations and call their methods. +// +// This package is still experimental and subject to change. +package longrunning // import "cloud.google.com/go/longrunning" + +import ( + "context" + "errors" + "fmt" + "time" + + autogen "cloud.google.com/go/longrunning/autogen" + pb "cloud.google.com/go/longrunning/autogen/longrunningpb" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + gax "github.com/googleapis/gax-go/v2" + "github.com/googleapis/gax-go/v2/apierror" + "google.golang.org/grpc/status" +) + +// ErrNoMetadata is the error returned by Metadata if the operation contains no metadata. +var ErrNoMetadata = errors.New("operation contains no metadata") + +// Operation represents the result of an API call that may not be ready yet. +type Operation struct { + c operationsClient + proto *pb.Operation +} + +type operationsClient interface { + GetOperation(context.Context, *pb.GetOperationRequest, ...gax.CallOption) (*pb.Operation, error) + CancelOperation(context.Context, *pb.CancelOperationRequest, ...gax.CallOption) error + DeleteOperation(context.Context, *pb.DeleteOperationRequest, ...gax.CallOption) error +} + +// InternalNewOperation is for use by the google Cloud Libraries only. +// +// InternalNewOperation returns an long-running operation, abstracting the raw pb.Operation. +// The conn parameter refers to a server that proto was received from. +func InternalNewOperation(inner *autogen.OperationsClient, proto *pb.Operation) *Operation { + return &Operation{ + c: inner, + proto: proto, + } +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service +// from which the operation is created. +func (op *Operation) Name() string { + return op.proto.Name +} + +// Done reports whether the long-running operation has completed. +func (op *Operation) Done() bool { + return op.proto.Done +} + +// Metadata unmarshals op's metadata into meta. +// If op does not contain any metadata, Metadata returns ErrNoMetadata and meta is unmodified. +func (op *Operation) Metadata(meta proto.Message) error { + if m := op.proto.Metadata; m != nil { + return ptypes.UnmarshalAny(m, meta) + } + return ErrNoMetadata +} + +// Poll fetches the latest state of a long-running operation. +// +// If Poll fails, the error is returned and op is unmodified. +// If Poll succeeds and the operation has completed with failure, +// the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true; if resp != nil, the response of the operation +// is stored in resp. +func (op *Operation) Poll(ctx context.Context, resp proto.Message, opts ...gax.CallOption) error { + if !op.Done() { + p, err := op.c.GetOperation(ctx, &pb.GetOperationRequest{Name: op.Name()}, opts...) + if err != nil { + return err + } + op.proto = p + } + if !op.Done() { + return nil + } + + switch r := op.proto.Result.(type) { + case *pb.Operation_Error: + err, _ := apierror.FromError(status.ErrorProto(r.Error)) + return err + case *pb.Operation_Response: + if resp == nil { + return nil + } + return ptypes.UnmarshalAny(r.Response, resp) + default: + return fmt.Errorf("unsupported result type %[1]T: %[1]v", r) + } +} + +// DefaultWaitInterval is the polling interval used by Operation.Wait. +const DefaultWaitInterval = 60 * time.Second + +// Wait is equivalent to WaitWithInterval using DefaultWaitInterval. +func (op *Operation) Wait(ctx context.Context, resp proto.Message, opts ...gax.CallOption) error { + return op.WaitWithInterval(ctx, resp, DefaultWaitInterval, opts...) +} + +// WaitWithInterval blocks until the operation is completed. +// If resp != nil, Wait stores the response in resp. +// WaitWithInterval polls every interval, except initially +// when it polls using exponential backoff. +// +// See documentation of Poll for error-handling information. +func (op *Operation) WaitWithInterval(ctx context.Context, resp proto.Message, interval time.Duration, opts ...gax.CallOption) error { + bo := gax.Backoff{ + Initial: 1 * time.Second, + Max: interval, + } + if bo.Max < bo.Initial { + bo.Max = bo.Initial + } + return op.wait(ctx, resp, &bo, gax.Sleep, opts...) +} + +type sleeper func(context.Context, time.Duration) error + +// wait implements Wait, taking exponentialBackoff and sleeper arguments for testing. +func (op *Operation) wait(ctx context.Context, resp proto.Message, bo *gax.Backoff, sl sleeper, opts ...gax.CallOption) error { + for { + if err := op.Poll(ctx, resp, opts...); err != nil { + return err + } + if op.Done() { + return nil + } + if err := sl(ctx, bo.Pause()); err != nil { + return err + } + } +} + +// Cancel starts asynchronous cancellation on a long-running operation. The server +// makes a best effort to cancel the operation, but success is not +// guaranteed. If the server doesn't support this method, it returns +// status.Code(err) == codes.Unimplemented. Clients can use +// Poll or other methods to check whether the cancellation succeeded or whether the +// operation completed despite cancellation. On successful cancellation, +// the operation is not deleted; instead, op.Poll returns an error +// with code Canceled. +func (op *Operation) Cancel(ctx context.Context, opts ...gax.CallOption) error { + return op.c.CancelOperation(ctx, &pb.CancelOperationRequest{Name: op.Name()}, opts...) +} + +// Delete deletes a long-running operation. This method indicates that the client is +// no longer interested in the operation result. It does not cancel the +// operation. If the server doesn't support this method, status.Code(err) == codes.Unimplemented. +func (op *Operation) Delete(ctx context.Context, opts ...gax.CallOption) error { + return op.c.DeleteOperation(ctx, &pb.DeleteOperationRequest{Name: op.Name()}, opts...) +} diff --git a/vendor/cloud.google.com/go/logging/go_mod_tidy_hack.go b/vendor/cloud.google.com/go/longrunning/tidyfix.go similarity index 84% rename from vendor/cloud.google.com/go/logging/go_mod_tidy_hack.go rename to vendor/cloud.google.com/go/longrunning/tidyfix.go index a932c70c41e0e..d9a07f99e0da4 100644 --- a/vendor/cloud.google.com/go/logging/go_mod_tidy_hack.go +++ b/vendor/cloud.google.com/go/longrunning/tidyfix.go @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,11 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -// This file, and the cloud.google.com/go import, won't actually become part of +// This file, and the {{.RootMod}} import, won't actually become part of // the resultant binary. +//go:build modhack // +build modhack -package logging +package longrunning // Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository import _ "cloud.google.com/go" diff --git a/vendor/cloud.google.com/go/migration.md b/vendor/cloud.google.com/go/migration.md new file mode 100644 index 0000000000000..224dcfa139727 --- /dev/null +++ b/vendor/cloud.google.com/go/migration.md @@ -0,0 +1,50 @@ +# go-genproto to google-cloud-go message type migration + +The message types for all of our client libraries are being migrated from the +`google.golang.org/genproto` [module](https://pkg.go.dev/google.golang.org/genproto) +to their respective product specific module in this repository. For example +this asset request type that was once found in [genproto](https://pkg.go.dev/google.golang.org/genproto@v0.0.0-20220908141613-51c1cc9bc6d0/googleapis/cloud/asset/v1p5beta1#ListAssetsRequest) +can now be found in directly in the [asset module](https://pkg.go.dev/cloud.google.com/go/asset/apiv1p5beta1/assetpb#ListAssetsRequest). + +Although the type definitions have moved, aliases have been left in the old +genproto packages to ensure a smooth non-breaking transition. + +## How do I migrate to the new packages? + +The easiest option is to run a migration tool at the root of our project. It is +like `go fix`, but specifically for this migration. Before running the tool it +is best to make sure any modules that have the prefix of `cloud.google.com/go` +are up to date. To run the tool, do the following: + +```bash +go run cloud.google.com/go/internal/aliasfix/cmd/aliasfix@latest . +go mod tidy +``` + +The tool should only change up to one line in the import statement per file. +This can also be done by hand if you prefer. + +## Do I have to migrate? + +Yes if you wish to keep using the newest versions of our client libraries with +the newest features -- You should migrate by the start of 2023. Until then we +will keep updating the aliases in go-genproto weekly. If you have an existing +workload that uses these client libraries and does not need to update its +dependencies there is no action to take. All existing written code will continue +to work. + +## Why are these types being moved + +1. This change will help simplify dependency trees over time. +2. The types will now be in product specific modules that are versioned + independently with semver. This is especially a benefit for users that rely + on multiple clients in a single application. Because message types are no + longer mono-packaged users are less likely to run into intermediate + dependency conflicts when updating dependencies. +3. Having all these types in one repository will help us ensure that unintended + changes are caught before they would be released. + +## Have questions? + +Please reach out to us on our [issue tracker](https://github.com/googleapis/google-cloud-go/issues/new?assignees=&labels=genproto-migration&template=migration-issue.md&title=package%3A+migration+help) +if you have any questions or concerns. diff --git a/vendor/cloud.google.com/go/release-please-config-individual.json b/vendor/cloud.google.com/go/release-please-config-individual.json new file mode 100644 index 0000000000000..b8766e0f2ea78 --- /dev/null +++ b/vendor/cloud.google.com/go/release-please-config-individual.json @@ -0,0 +1,45 @@ +{ + "$schema": "https://raw.githubusercontent.com/googleapis/release-please/main/schemas/config.json", + "release-type": "go-yoshi", + "include-component-in-tag": true, + "separate-pull-requests": true, + "tag-separator": "/", + "packages": { + "bigquery": { + "component": "bigquery" + }, + "bigtable": { + "component": "bigtable" + }, + "datastore": { + "component": "datastore" + }, + "errorreporting": { + "component": "errorreporting" + }, + "firestore": { + "component": "firestore" + }, + "logging": { + "component": "logging" + }, + "profiler": { + "component": "profiler" + }, + "pubsub": { + "component": "pubsub" + }, + "pubsublite": { + "component": "pubsublite" + }, + "spanner": { + "component": "spanner" + }, + "storage": { + "component": "storage" + } + }, + "plugins": [ + "sentence-case" + ] +} \ No newline at end of file diff --git a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json index 885710c975b75..7d1d51f0eadd0 100644 --- a/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json +++ b/vendor/cloud.google.com/go/release-please-config-yoshi-submodules.json @@ -21,6 +21,12 @@ "apigeeconnect": { "component": "apigeeconnect" }, + "apigeeregistry": { + "component": "apigeeregistry" + }, + "apikeys": { + "component": "apikeys" + }, "appengine": { "component": "appengine" }, @@ -45,6 +51,9 @@ "batch": { "component": "batch" }, + "beyondcorp": { + "component": "beyondcorp" + }, "billing": { "component": "billing" }, @@ -69,6 +78,9 @@ "compute": { "component": "compute" }, + "compute/metadata": { + "component": "compute/metadata" + }, "contactcenterinsights": { "component": "contactcenterinsights" }, @@ -84,6 +96,9 @@ "dataflow": { "component": "dataflow" }, + "dataform": { + "component": "dataform" + }, "datafusion": { "component": "datafusion" }, @@ -117,6 +132,9 @@ "domains": { "component": "domains" }, + "edgecontainer": { + "component": "edgecontainer" + }, "essentialcontacts": { "component": "essentialcontacts" }, @@ -171,9 +189,15 @@ "lifesciences": { "component": "lifesciences" }, + "longrunning": { + "component": "longrunning" + }, "managedidentities": { "component": "managedidentities" }, + "maps": { + "component": "maps" + }, "mediatranslation": { "component": "mediatranslation" }, @@ -306,6 +330,9 @@ "vmmigration": { "component": "vmmigration" }, + "vmwareengine": { + "component": "vmwareengine" + }, "vpcaccess": { "component": "vpcaccess" }, @@ -318,5 +345,6 @@ "workflows": { "component": "workflows" } - } + }, + "plugins": ["sentence-case"] } diff --git a/vendor/cloud.google.com/go/release-please-config.json b/vendor/cloud.google.com/go/release-please-config.json index 546e7c31ad590..1400245b8a3be 100644 --- a/vendor/cloud.google.com/go/release-please-config.json +++ b/vendor/cloud.google.com/go/release-please-config.json @@ -6,5 +6,6 @@ ".": { "component": "main" } - } + }, + "plugins": ["sentence-case"] } diff --git a/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go b/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go index 16200a100e1ed..fdde08a75c785 100644 --- a/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go +++ b/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go @@ -1,3 +1,17 @@ +// Copyright 2023 The go-fuzz-headers Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package gofuzzheaders import ( @@ -18,7 +32,10 @@ import ( securejoin "github.com/cyphar/filepath-securejoin" ) -var MaxTotalLen uint32 = 2000000 +var ( + MaxTotalLen uint32 = 2000000 + maxDepth = 100 +) func SetMaxTotalLen(newLen uint32) { MaxTotalLen = newLen @@ -32,6 +49,7 @@ type ConsumeFuzzer struct { NumberOfCalls int position uint32 fuzzUnexportedFields bool + curDepth int Funcs map[reflect.Type]reflect.Value } @@ -44,6 +62,7 @@ func NewConsumer(fuzzData []byte) *ConsumeFuzzer { data: fuzzData, dataTotal: uint32(len(fuzzData)), Funcs: make(map[reflect.Type]reflect.Value), + curDepth: 0, } } @@ -129,11 +148,18 @@ func (f *ConsumeFuzzer) setCustom(v reflect.Value) error { } func (f *ConsumeFuzzer) fuzzStruct(e reflect.Value, customFunctions bool) error { + if f.curDepth >= maxDepth { + // return err or nil here? + return nil + } + f.curDepth++ + defer func() { f.curDepth-- }() + // We check if we should check for custom functions if customFunctions && e.IsValid() && e.CanAddr() { err := f.setCustom(e.Addr()) - if err == nil { - return nil + if err != nil { + return err } } @@ -363,18 +389,11 @@ func (f *ConsumeFuzzer) GetUint16() (uint16, error) { } func (f *ConsumeFuzzer) GetUint32() (uint32, error) { - u32, err := f.GetNBytes(4) + i, err := f.GetInt() if err != nil { - return 0, err + return uint32(0), err } - littleEndian, err := f.GetBool() - if err != nil { - return 0, err - } - if littleEndian { - return binary.LittleEndian.Uint32(u32), nil - } - return binary.BigEndian.Uint32(u32), nil + return uint32(i), nil } func (f *ConsumeFuzzer) GetUint64() (uint64, error) { @@ -431,7 +450,7 @@ func (f *ConsumeFuzzer) GetString() (string, error) { if f.position > MaxTotalLen { return "nil", errors.New("created too large a string") } - byteBegin := f.position - 1 + byteBegin := f.position if byteBegin >= f.dataTotal { return "nil", errors.New("not enough bytes to create string") } @@ -476,7 +495,7 @@ func returnTarBytes(buf []byte) ([]byte, error) { } fileCounter++ } - if fileCounter > 4 { + if fileCounter >= 1 { return buf, nil } return nil, fmt.Errorf("not enough files were created\n") @@ -546,27 +565,77 @@ func setTarHeaderTypeflag(hdr *tar.Header, f *ConsumeFuzzer) error { return nil } +func tooSmallFileBody(length uint32) bool { + if length < 2 { + return true + } + if length < 4 { + return true + } + if length < 10 { + return true + } + if length < 100 { + return true + } + if length < 500 { + return true + } + if length < 1000 { + return true + } + if length < 2000 { + return true + } + if length < 4000 { + return true + } + if length < 8000 { + return true + } + if length < 16000 { + return true + } + if length < 32000 { + return true + } + if length < 64000 { + return true + } + if length < 128000 { + return true + } + if length < 264000 { + return true + } + return false +} + func (f *ConsumeFuzzer) createTarFileBody() ([]byte, error) { length, err := f.GetUint32() if err != nil { return nil, errors.New("not enough bytes to create byte array") } + shouldUseLargeFileBody, err := f.GetBool() + if err != nil { + return nil, errors.New("not enough bytes to check long file body") + } + + if shouldUseLargeFileBody && tooSmallFileBody(length) { + return nil, errors.New("File body was too small") + } + // A bit of optimization to attempt to create a file body // when we don't have as many bytes left as "length" remainingBytes := f.dataTotal - f.position if remainingBytes == 0 { return nil, errors.New("created too large a string") } - if remainingBytes < 50 { - length = length % remainingBytes - } else if f.dataTotal < 500 { - length = length % f.dataTotal - } if f.position+length > MaxTotalLen { return nil, errors.New("created too large a string") } - byteBegin := f.position - 1 + byteBegin := f.position if byteBegin >= f.dataTotal { return nil, errors.New("not enough bytes to create byte array") } @@ -606,7 +675,7 @@ func (f *ConsumeFuzzer) getTarFilename() (string, error) { if f.position > MaxTotalLen { return "nil", errors.New("created too large a string") } - byteBegin := f.position - 1 + byteBegin := f.position if byteBegin >= f.dataTotal { return "nil", errors.New("not enough bytes to create string") } @@ -669,7 +738,7 @@ func (f *ConsumeFuzzer) TarBytes() ([]byte, error) { return returnTarBytes(buf.Bytes()) } } - return returnTarBytes(buf.Bytes()) + return buf.Bytes(), nil } // CreateFiles creates pseudo-random files in rootDir. diff --git a/vendor/github.com/AdaLogics/go-fuzz-headers/funcs.go b/vendor/github.com/AdaLogics/go-fuzz-headers/funcs.go index 40273c13a8fef..8ca3a61b8787b 100644 --- a/vendor/github.com/AdaLogics/go-fuzz-headers/funcs.go +++ b/vendor/github.com/AdaLogics/go-fuzz-headers/funcs.go @@ -1,3 +1,17 @@ +// Copyright 2023 The go-fuzz-headers Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package gofuzzheaders import ( diff --git a/vendor/github.com/AdaLogics/go-fuzz-headers/sql.go b/vendor/github.com/AdaLogics/go-fuzz-headers/sql.go index 9290aac3a4944..2afd49f84819f 100644 --- a/vendor/github.com/AdaLogics/go-fuzz-headers/sql.go +++ b/vendor/github.com/AdaLogics/go-fuzz-headers/sql.go @@ -1,3 +1,17 @@ +// Copyright 2023 The go-fuzz-headers Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package gofuzzheaders import ( diff --git a/vendor/go.opentelemetry.io/otel/internal/metric/LICENSE b/vendor/github.com/AdamKorcz/go-118-fuzz-build/LICENSE similarity index 100% rename from vendor/go.opentelemetry.io/otel/internal/metric/LICENSE rename to vendor/github.com/AdamKorcz/go-118-fuzz-build/LICENSE diff --git a/vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/f.go b/vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/f.go new file mode 100644 index 0000000000000..3f4d9aeb61dc3 --- /dev/null +++ b/vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/f.go @@ -0,0 +1,207 @@ +package testing + +import ( + "fmt" + fuzz "github.com/AdaLogics/go-fuzz-headers" + "os" + "reflect" +) + +type F struct { + Data []byte + T *T + FuzzFunc func(*T, any) +} + +func (f *F) CleanupTempDirs() { + f.T.CleanupTempDirs() +} + +func (f *F) Add(args ...any) {} +func (c *F) Cleanup(f func()) {} +func (c *F) Error(args ...any) {} +func (c *F) Errorf(format string, args ...any) {} +func (f *F) Fail() {} +func (c *F) FailNow() {} +func (c *F) Failed() bool { return false } +func (c *F) Fatal(args ...any) {} +func (c *F) Fatalf(format string, args ...any) {} +func (f *F) Fuzz(ff any) { + // we are assuming that ff is a func. + // TODO: Add a check for UX purposes + + fn := reflect.ValueOf(ff) + fnType := fn.Type() + var types []reflect.Type + for i := 1; i < fnType.NumIn(); i++ { + t := fnType.In(i) + + types = append(types, t) + } + args := []reflect.Value{reflect.ValueOf(f.T)} + fuzzConsumer := fuzz.NewConsumer(f.Data) + for _, v := range types { + switch v.String() { + case "[]uint8": + b, err := fuzzConsumer.GetBytes() + if err != nil { + return + } + newBytes := reflect.New(v) + newBytes.Elem().SetBytes(b) + args = append(args, newBytes.Elem()) + case "string": + s, err := fuzzConsumer.GetString() + if err != nil { + return + } + newString := reflect.New(v) + newString.Elem().SetString(s) + args = append(args, newString.Elem()) + case "int": + randInt, err := fuzzConsumer.GetInt() + if err != nil { + return + } + newInt := reflect.New(v) + newInt.Elem().SetInt(int64(randInt)) + args = append(args, newInt.Elem()) + case "int8": + randInt, err := fuzzConsumer.GetInt() + if err != nil { + return + } + newInt := reflect.New(v) + newInt.Elem().SetInt(int64(randInt)) + args = append(args, newInt.Elem()) + case "int16": + randInt, err := fuzzConsumer.GetInt() + if err != nil { + return + } + newInt := reflect.New(v) + newInt.Elem().SetInt(int64(randInt)) + args = append(args, newInt.Elem()) + case "int32": + randInt, err := fuzzConsumer.GetInt() + if err != nil { + return + } + newInt := reflect.New(v) + newInt.Elem().SetInt(int64(randInt)) + args = append(args, newInt.Elem()) + case "int64": + randInt, err := fuzzConsumer.GetInt() + if err != nil { + return + } + newInt := reflect.New(v) + newInt.Elem().SetInt(int64(randInt)) + args = append(args, newInt.Elem()) + case "uint": + randInt, err := fuzzConsumer.GetInt() + if err != nil { + return + } + newUint := reflect.New(v) + newUint.Elem().SetUint(uint64(randInt)) + args = append(args, newUint.Elem()) + case "uint8": + randInt, err := fuzzConsumer.GetInt() + if err != nil { + return + } + newUint := reflect.New(v) + newUint.Elem().SetUint(uint64(randInt)) + args = append(args, newUint.Elem()) + case "uint16": + randInt, err := fuzzConsumer.GetUint16() + if err != nil { + return + } + newUint16 := reflect.New(v) + newUint16.Elem().SetUint(uint64(randInt)) + args = append(args, newUint16.Elem()) + case "uint32": + randInt, err := fuzzConsumer.GetUint32() + if err != nil { + return + } + newUint32 := reflect.New(v) + newUint32.Elem().SetUint(uint64(randInt)) + args = append(args, newUint32.Elem()) + case "uint64": + randInt, err := fuzzConsumer.GetUint64() + if err != nil { + return + } + newUint64 := reflect.New(v) + newUint64.Elem().SetUint(uint64(randInt)) + args = append(args, newUint64.Elem()) + case "rune": + randRune, err := fuzzConsumer.GetRune() + if err != nil { + return + } + newRune := reflect.New(v) + newRune.Elem().Set(reflect.ValueOf(randRune)) + args = append(args, newRune.Elem()) + case "float32": + randFloat, err := fuzzConsumer.GetFloat32() + if err != nil { + return + } + newFloat := reflect.New(v) + newFloat.Elem().Set(reflect.ValueOf(randFloat)) + args = append(args, newFloat.Elem()) + case "float64": + randFloat, err := fuzzConsumer.GetFloat64() + if err != nil { + return + } + newFloat := reflect.New(v) + newFloat.Elem().Set(reflect.ValueOf(randFloat)) + args = append(args, newFloat.Elem()) + case "bool": + randBool, err := fuzzConsumer.GetBool() + if err != nil { + return + } + newBool := reflect.New(v) + newBool.Elem().Set(reflect.ValueOf(randBool)) + args = append(args, newBool.Elem()) + default: + fmt.Println(v.String()) + } + } + fn.Call(args) +} +func (f *F) Helper() {} +func (c *F) Log(args ...any) { + fmt.Println(args...) +} +func (c *F) Logf(format string, args ...any) { + fmt.Println(format, args) +} +func (c *F) Name() string { return "libFuzzer" } +func (c *F) Setenv(key, value string) {} +func (c *F) Skip(args ...any) { + panic("GO-FUZZ-BUILD-PANIC") +} +func (c *F) SkipNow() { + panic("GO-FUZZ-BUILD-PANIC") +} +func (c *F) Skipf(format string, args ...any) { + panic("GO-FUZZ-BUILD-PANIC") +} +func (f *F) Skipped() bool { return false } + +func (f *F) TempDir() string { + dir, err := os.MkdirTemp("", "fuzzdir-") + if err != nil { + panic(err) + } + f.T.TempDirs = append(f.T.TempDirs, dir) + + return dir +} diff --git a/vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/t.go b/vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/t.go new file mode 100644 index 0000000000000..885fdb3be60e8 --- /dev/null +++ b/vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/t.go @@ -0,0 +1,129 @@ +package testing + +import ( + "fmt" + "os" + "strings" + "time" +) + +// T can be used to terminate the current fuzz iteration +// without terminating the whole fuzz run. To do so, simply +// panic with the text "GO-FUZZ-BUILD-PANIC" and the fuzzer +// will recover. +type T struct { + TempDirs []string +} + +func NewT() *T { + tempDirs := make([]string, 0) + return &T{TempDirs: tempDirs} +} + +func unsupportedApi(name string) string { + plsOpenIss := "Please open an issue https://github.com/AdamKorcz/go-118-fuzz-build if you need this feature." + var b strings.Builder + b.WriteString(fmt.Sprintf("%s is not supported when fuzzing in libFuzzer mode\n.", name)) + b.WriteString(plsOpenIss) + return b.String() +} + +func (t *T) Cleanup(f func()) { + f() +} + +func (t *T) Deadline() (deadline time.Time, ok bool) { + panic(unsupportedApi("t.Deadline()")) +} + +func (t *T) Error(args ...any) { + fmt.Println(args...) + panic("error") +} + +func (t *T) Errorf(format string, args ...any) { + fmt.Printf(format+"\n", args...) + panic("errorf") +} + +func (t *T) Fail() { + panic("Called T.Fail()") +} + +func (t *T) FailNow() { + panic("Called T.Fail()") + panic(unsupportedApi("t.FailNow()")) +} + +func (t *T) Failed() bool { + panic(unsupportedApi("t.Failed()")) +} + +func (t *T) Fatal(args ...any) { + fmt.Println(args...) + panic("fatal") +} +func (t *T) Fatalf(format string, args ...any) { + fmt.Printf(format+"\n", args...) + panic("fatal") +} +func (t *T) Helper() { + // We can't support it, but it also just impacts how failures are reported, so we can ignore it +} +func (t *T) Log(args ...any) { + fmt.Println(args...) +} + +func (t *T) Logf(format string, args ...any) { + fmt.Println(format) + fmt.Println(args...) +} + +func (t *T) Name() string { + return "libFuzzer" +} + +func (t *T) Parallel() { + panic(unsupportedApi("t.Parallel()")) +} +func (t *T) Run(name string, f func(t *T)) bool { + panic(unsupportedApi("t.Run()")) +} + +func (t *T) Setenv(key, value string) { + +} + +func (t *T) Skip(args ...any) { + panic("GO-FUZZ-BUILD-PANIC") +} +func (t *T) SkipNow() { + panic("GO-FUZZ-BUILD-PANIC") +} + +// Is not really supported. We just skip instead +// of printing any message. A log message can be +// added if need be. +func (t *T) Skipf(format string, args ...any) { + panic("GO-FUZZ-BUILD-PANIC") +} +func (t *T) Skipped() bool { + panic(unsupportedApi("t.Skipped()")) +} +func (t *T) TempDir() string { + dir, err := os.MkdirTemp("", "fuzzdir-") + if err != nil { + panic(err) + } + t.TempDirs = append(t.TempDirs, dir) + + return dir +} + +func (t *T) CleanupTempDirs() { + if len(t.TempDirs) > 0 { + for _, tempDir := range t.TempDirs { + os.RemoveAll(tempDir) + } + } +} diff --git a/vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/unsupported_funcs.go b/vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/unsupported_funcs.go new file mode 100644 index 0000000000000..0e37c1fe20ed1 --- /dev/null +++ b/vendor/github.com/AdamKorcz/go-118-fuzz-build/testing/unsupported_funcs.go @@ -0,0 +1,42 @@ +package testing + +import ( + "testing" +) + +func AllocsPerRun(runs int, f func()) (avg float64) { + panic(unsupportedApi("testing.AllocsPerRun")) +} +func CoverMode() string { + panic(unsupportedApi("testing.CoverMode")) +} +func Coverage() float64 { + panic(unsupportedApi("testing.Coverage")) +} +func Init() { + panic(unsupportedApi("testing.Init")) + +} +func RegisterCover(c testing.Cover) { + panic(unsupportedApi("testing.RegisterCover")) +} +func RunExamples(matchString func(pat, str string) (bool, error), examples []testing.InternalExample) (ok bool) { + panic(unsupportedApi("testing.RunExamples")) +} + +func RunTests(matchString func(pat, str string) (bool, error), tests []testing.InternalTest) (ok bool) { + panic(unsupportedApi("testing.RunTests")) +} + +func Short() bool { + panic(unsupportedApi("testing.Short")) +} + +func Verbose() bool { + panic(unsupportedApi("testing.Verbose")) +} + +type M struct {} +func (m *M) Run() (code int) { + panic("testing.M is not support in libFuzzer Mode") +} \ No newline at end of file diff --git a/vendor/github.com/Microsoft/go-winio/.gitattributes b/vendor/github.com/Microsoft/go-winio/.gitattributes new file mode 100644 index 0000000000000..94f480de94e1d --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/.gitattributes @@ -0,0 +1 @@ +* text=auto eol=lf \ No newline at end of file diff --git a/vendor/github.com/Microsoft/go-winio/.gitignore b/vendor/github.com/Microsoft/go-winio/.gitignore index b883f1fdc6d69..815e20660e5d6 100644 --- a/vendor/github.com/Microsoft/go-winio/.gitignore +++ b/vendor/github.com/Microsoft/go-winio/.gitignore @@ -1 +1,10 @@ +.vscode/ + *.exe + +# testing +testdata + +# go workspaces +go.work +go.work.sum diff --git a/vendor/github.com/Microsoft/go-winio/.golangci.yml b/vendor/github.com/Microsoft/go-winio/.golangci.yml new file mode 100644 index 0000000000000..af403bb13a087 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/.golangci.yml @@ -0,0 +1,144 @@ +run: + skip-dirs: + - pkg/etw/sample + +linters: + enable: + # style + - containedctx # struct contains a context + - dupl # duplicate code + - errname # erorrs are named correctly + - goconst # strings that should be constants + - godot # comments end in a period + - misspell + - nolintlint # "//nolint" directives are properly explained + - revive # golint replacement + - stylecheck # golint replacement, less configurable than revive + - unconvert # unnecessary conversions + - wastedassign + + # bugs, performance, unused, etc ... + - contextcheck # function uses a non-inherited context + - errorlint # errors not wrapped for 1.13 + - exhaustive # check exhaustiveness of enum switch statements + - gofmt # files are gofmt'ed + - gosec # security + - nestif # deeply nested ifs + - nilerr # returns nil even with non-nil error + - prealloc # slices that can be pre-allocated + - structcheck # unused struct fields + - unparam # unused function params + +issues: + exclude-rules: + # err is very often shadowed in nested scopes + - linters: + - govet + text: '^shadow: declaration of "err" shadows declaration' + + # ignore long lines for skip autogen directives + - linters: + - revive + text: "^line-length-limit: " + source: "^//(go:generate|sys) " + + # allow unjustified ignores of error checks in defer statements + - linters: + - nolintlint + text: "^directive `//nolint:errcheck` should provide explanation" + source: '^\s*defer ' + + # allow unjustified ignores of error lints for io.EOF + - linters: + - nolintlint + text: "^directive `//nolint:errorlint` should provide explanation" + source: '[=|!]= io.EOF' + + +linters-settings: + govet: + enable-all: true + disable: + # struct order is often for Win32 compat + # also, ignore pointer bytes/GC issues for now until performance becomes an issue + - fieldalignment + check-shadowing: true + nolintlint: + allow-leading-space: false + require-explanation: true + require-specific: true + revive: + # revive is more configurable than static check, so likely the preferred alternative to static-check + # (once the perf issue is solved: https://github.com/golangci/golangci-lint/issues/2997) + enable-all-rules: + true + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md + rules: + # rules with required arguments + - name: argument-limit + disabled: true + - name: banned-characters + disabled: true + - name: cognitive-complexity + disabled: true + - name: cyclomatic + disabled: true + - name: file-header + disabled: true + - name: function-length + disabled: true + - name: function-result-limit + disabled: true + - name: max-public-structs + disabled: true + # geneally annoying rules + - name: add-constant # complains about any and all strings and integers + disabled: true + - name: confusing-naming # we frequently use "Foo()" and "foo()" together + disabled: true + - name: flag-parameter # excessive, and a common idiom we use + disabled: true + # general config + - name: line-length-limit + arguments: + - 140 + - name: var-naming + arguments: + - [] + - - CID + - CRI + - CTRD + - DACL + - DLL + - DOS + - ETW + - FSCTL + - GCS + - GMSA + - HCS + - HV + - IO + - LCOW + - LDAP + - LPAC + - LTSC + - MMIO + - NT + - OCI + - PMEM + - PWSH + - RX + - SACl + - SID + - SMB + - TX + - VHD + - VHDX + - VMID + - VPCI + - WCOW + - WIM + stylecheck: + checks: + - "all" + - "-ST1003" # use revive's var naming diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md index 683be1dcf9c8a..7474b4f0b653a 100644 --- a/vendor/github.com/Microsoft/go-winio/README.md +++ b/vendor/github.com/Microsoft/go-winio/README.md @@ -13,16 +13,60 @@ Please see the LICENSE file for licensing information. ## Contributing -This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) -declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. +This project welcomes contributions and suggestions. +Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that +you have the right to, and actually do, grant us the rights to use your contribution. +For details, visit [Microsoft CLA](https://cla.microsoft.com). -When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR -appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. +When you submit a pull request, a CLA-bot will automatically determine whether you need to +provide a CLA and decorate the PR appropriately (e.g., label, comment). +Simply follow the instructions provided by the bot. +You will only need to do this once across all repos using our CLA. -We also require that contributors sign their commits using git commit -s or git commit --signoff to certify they either authored the work themselves -or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for more info, as well as to make sure that you can -attest to the rules listed. Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off. +Additionally, the pull request pipeline requires the following steps to be performed before +mergining. +### Code Sign-Off + +We require that contributors sign their commits using [`git commit --signoff`][git-commit-s] +to certify they either authored the work themselves or otherwise have permission to use it in this project. + +A range of commits can be signed off using [`git rebase --signoff`][git-rebase-s]. + +Please see [the developer certificate](https://developercertificate.org) for more info, +as well as to make sure that you can attest to the rules listed. +Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off. + +### Linting + +Code must pass a linting stage, which uses [`golangci-lint`][lint]. +The linting settings are stored in [`.golangci.yaml`](./.golangci.yaml), and can be run +automatically with VSCode by adding the following to your workspace or folder settings: + +```json + "go.lintTool": "golangci-lint", + "go.lintOnSave": "package", +``` + +Additional editor [integrations options are also available][lint-ide]. + +Alternatively, `golangci-lint` can be [installed locally][lint-install] and run from the repo root: + +```shell +# use . or specify a path to only lint a package +# to show all lint errors, use flags "--max-issues-per-linter=0 --max-same-issues=0" +> golangci-lint run ./... +``` + +### Go Generate + +The pipeline checks that auto-generated code, via `go generate`, are up to date. + +This can be done for the entire repo: + +```shell +> go generate ./... +``` ## Code of Conduct @@ -30,8 +74,16 @@ This project has adopted the [Microsoft Open Source Code of Conduct](https://ope For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. +## Special Thanks +Thanks to [natefinch][natefinch] for the inspiration for this library. +See [npipe](https://github.com/natefinch/npipe) for another named pipe implementation. -## Special Thanks -Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe -for another named pipe implementation. +[lint]: https://golangci-lint.run/ +[lint-ide]: https://golangci-lint.run/usage/integrations/#editor-integration +[lint-install]: https://golangci-lint.run/usage/install/#local-installation + +[git-commit-s]: https://git-scm.com/docs/git-commit#Documentation/git-commit.txt--s +[git-rebase-s]: https://git-scm.com/docs/git-rebase#Documentation/git-rebase.txt---signoff + +[natefinch]: https://github.com/natefinch diff --git a/vendor/github.com/Microsoft/go-winio/SECURITY.md b/vendor/github.com/Microsoft/go-winio/SECURITY.md new file mode 100644 index 0000000000000..869fdfe2b2469 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/SECURITY.md @@ -0,0 +1,41 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). + + diff --git a/vendor/github.com/Microsoft/go-winio/backup.go b/vendor/github.com/Microsoft/go-winio/backup.go index 2be34af43106c..09621c88463a5 100644 --- a/vendor/github.com/Microsoft/go-winio/backup.go +++ b/vendor/github.com/Microsoft/go-winio/backup.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winio @@ -7,11 +8,12 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "runtime" "syscall" "unicode/utf16" + + "golang.org/x/sys/windows" ) //sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead @@ -24,7 +26,7 @@ const ( BackupAlternateData BackupLink BackupPropertyData - BackupObjectId + BackupObjectId //revive:disable-line:var-naming ID, not Id BackupReparseData BackupSparseBlock BackupTxfsData @@ -34,14 +36,16 @@ const ( StreamSparseAttributes = uint32(8) ) +//nolint:revive // var-naming: ALL_CAPS const ( - WRITE_DAC = 0x40000 - WRITE_OWNER = 0x80000 - ACCESS_SYSTEM_SECURITY = 0x1000000 + WRITE_DAC = windows.WRITE_DAC + WRITE_OWNER = windows.WRITE_OWNER + ACCESS_SYSTEM_SECURITY = windows.ACCESS_SYSTEM_SECURITY ) // BackupHeader represents a backup stream of a file. type BackupHeader struct { + //revive:disable-next-line:var-naming ID, not Id Id uint32 // The backup stream ID Attributes uint32 // Stream attributes Size int64 // The size of the stream in bytes @@ -49,8 +53,8 @@ type BackupHeader struct { Offset int64 // The offset of the stream in the file (for BackupSparseBlock only). } -type win32StreamId struct { - StreamId uint32 +type win32StreamID struct { + StreamID uint32 Attributes uint32 Size uint64 NameSize uint32 @@ -71,7 +75,7 @@ func NewBackupStreamReader(r io.Reader) *BackupStreamReader { // Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if // it was not completely read. func (r *BackupStreamReader) Next() (*BackupHeader, error) { - if r.bytesLeft > 0 { + if r.bytesLeft > 0 { //nolint:nestif // todo: flatten this if s, ok := r.r.(io.Seeker); ok { // Make sure Seek on io.SeekCurrent sometimes succeeds // before trying the actual seek. @@ -82,16 +86,16 @@ func (r *BackupStreamReader) Next() (*BackupHeader, error) { r.bytesLeft = 0 } } - if _, err := io.Copy(ioutil.Discard, r); err != nil { + if _, err := io.Copy(io.Discard, r); err != nil { return nil, err } } - var wsi win32StreamId + var wsi win32StreamID if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { return nil, err } hdr := &BackupHeader{ - Id: wsi.StreamId, + Id: wsi.StreamID, Attributes: wsi.Attributes, Size: int64(wsi.Size), } @@ -102,7 +106,7 @@ func (r *BackupStreamReader) Next() (*BackupHeader, error) { } hdr.Name = syscall.UTF16ToString(name) } - if wsi.StreamId == BackupSparseBlock { + if wsi.StreamID == BackupSparseBlock { if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { return nil, err } @@ -147,8 +151,8 @@ func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { return fmt.Errorf("missing %d bytes", w.bytesLeft) } name := utf16.Encode([]rune(hdr.Name)) - wsi := win32StreamId{ - StreamId: hdr.Id, + wsi := win32StreamID{ + StreamID: hdr.Id, Attributes: hdr.Attributes, Size: uint64(hdr.Size), NameSize: uint32(len(name) * 2), @@ -203,7 +207,7 @@ func (r *BackupFileReader) Read(b []byte) (int, error) { var bytesRead uint32 err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) if err != nil { - return 0, &os.PathError{"BackupRead", r.f.Name(), err} + return 0, &os.PathError{Op: "BackupRead", Path: r.f.Name(), Err: err} } runtime.KeepAlive(r.f) if bytesRead == 0 { @@ -216,7 +220,7 @@ func (r *BackupFileReader) Read(b []byte) (int, error) { // the underlying file. func (r *BackupFileReader) Close() error { if r.ctx != 0 { - backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) + _ = backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) runtime.KeepAlive(r.f) r.ctx = 0 } @@ -242,7 +246,7 @@ func (w *BackupFileWriter) Write(b []byte) (int, error) { var bytesWritten uint32 err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) if err != nil { - return 0, &os.PathError{"BackupWrite", w.f.Name(), err} + return 0, &os.PathError{Op: "BackupWrite", Path: w.f.Name(), Err: err} } runtime.KeepAlive(w.f) if int(bytesWritten) != len(b) { @@ -255,7 +259,7 @@ func (w *BackupFileWriter) Write(b []byte) (int, error) { // close the underlying file. func (w *BackupFileWriter) Close() error { if w.ctx != 0 { - backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) + _ = backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) runtime.KeepAlive(w.f) w.ctx = 0 } @@ -271,7 +275,13 @@ func OpenForBackup(path string, access uint32, share uint32, createmode uint32) if err != nil { return nil, err } - h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0) + h, err := syscall.CreateFile(&winPath[0], + access, + share, + nil, + createmode, + syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, + 0) if err != nil { err = &os.PathError{Op: "open", Path: path, Err: err} return nil, err diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/noop.go b/vendor/github.com/Microsoft/go-winio/backuptar/doc.go similarity index 81% rename from vendor/github.com/Microsoft/go-winio/backuptar/noop.go rename to vendor/github.com/Microsoft/go-winio/backuptar/doc.go index d39eccf0238d1..965d52ab04c56 100644 --- a/vendor/github.com/Microsoft/go-winio/backuptar/noop.go +++ b/vendor/github.com/Microsoft/go-winio/backuptar/doc.go @@ -1,4 +1,3 @@ -// +build !windows // This file only exists to allow go get on non-Windows platforms. package backuptar diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go b/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go index 3416096639918..455fd798eb9c1 100644 --- a/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go +++ b/vendor/github.com/Microsoft/go-winio/backuptar/strconv.go @@ -1,3 +1,5 @@ +//go:build windows + package backuptar import ( diff --git a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go index 2342a7fcd6fb9..6b3b0cd5198ae 100644 --- a/vendor/github.com/Microsoft/go-winio/backuptar/tar.go +++ b/vendor/github.com/Microsoft/go-winio/backuptar/tar.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package backuptar @@ -7,7 +8,6 @@ import ( "encoding/base64" "fmt" "io" - "io/ioutil" "path/filepath" "strconv" "strings" @@ -18,17 +18,18 @@ import ( "golang.org/x/sys/windows" ) +//nolint:deadcode,varcheck // keep unused constants for potential future use const ( - c_ISUID = 04000 // Set uid - c_ISGID = 02000 // Set gid - c_ISVTX = 01000 // Save text (sticky bit) - c_ISDIR = 040000 // Directory - c_ISFIFO = 010000 // FIFO - c_ISREG = 0100000 // Regular file - c_ISLNK = 0120000 // Symbolic link - c_ISBLK = 060000 // Block special file - c_ISCHR = 020000 // Character special file - c_ISSOCK = 0140000 // Socket + cISUID = 0004000 // Set uid + cISGID = 0002000 // Set gid + cISVTX = 0001000 // Save text (sticky bit) + cISDIR = 0040000 // Directory + cISFIFO = 0010000 // FIFO + cISREG = 0100000 // Regular file + cISLNK = 0120000 // Symbolic link + cISBLK = 0060000 // Block special file + cISCHR = 0020000 // Character special file + cISSOCK = 0140000 // Socket ) const ( @@ -44,7 +45,7 @@ const ( // zeroReader is an io.Reader that always returns 0s. type zeroReader struct{} -func (zr zeroReader) Read(b []byte) (int, error) { +func (zeroReader) Read(b []byte) (int, error) { for i := range b { b[i] = 0 } @@ -55,7 +56,7 @@ func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error { curOffset := int64(0) for { bhdr, err := br.Next() - if err == io.EOF { + if err == io.EOF { //nolint:errorlint err = io.ErrUnexpectedEOF } if err != nil { @@ -71,8 +72,8 @@ func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error { } // archive/tar does not support writing sparse files // so just write zeroes to catch up to the current offset. - if _, err := io.CopyN(t, zeroReader{}, bhdr.Offset-curOffset); err != nil { - return fmt.Errorf("seek to offset %d: %s", bhdr.Offset, err) + if _, err = io.CopyN(t, zeroReader{}, bhdr.Offset-curOffset); err != nil { + return fmt.Errorf("seek to offset %d: %w", bhdr.Offset, err) } if bhdr.Size == 0 { // A sparse block with size = 0 is used to mark the end of the sparse blocks. @@ -106,7 +107,7 @@ func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *ta hdr.PAXRecords[hdrCreationTime] = formatPAXTime(time.Unix(0, fileInfo.CreationTime.Nanoseconds())) if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { - hdr.Mode |= c_ISDIR + hdr.Mode |= cISDIR hdr.Size = 0 hdr.Typeflag = tar.TypeDir } @@ -116,32 +117,29 @@ func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *ta // SecurityDescriptorFromTarHeader reads the SDDL associated with the header of the current file // from the tar header and returns the security descriptor into a byte slice. func SecurityDescriptorFromTarHeader(hdr *tar.Header) ([]byte, error) { - // Maintaining old SDDL-based behavior for backward - // compatibility. All new tar headers written by this library - // will have raw binary for the security descriptor. - var sd []byte - var err error - if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok { - sd, err = winio.SddlToSecurityDescriptor(sddl) - if err != nil { - return nil, err - } - } if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok { - sd, err = base64.StdEncoding.DecodeString(sdraw) + sd, err := base64.StdEncoding.DecodeString(sdraw) if err != nil { + // Not returning sd as-is in the error-case, as base64.DecodeString + // may return partially decoded data (not nil or empty slice) in case + // of a failure: https://github.com/golang/go/blob/go1.17.7/src/encoding/base64/base64.go#L382-L387 return nil, err } + return sd, nil } - return sd, nil + // Maintaining old SDDL-based behavior for backward compatibility. All new + // tar headers written by this library will have raw binary for the security + // descriptor. + if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok { + return winio.SddlToSecurityDescriptor(sddl) + } + return nil, nil } // ExtendedAttributesFromTarHeader reads the EAs associated with the header of the // current file from the tar header and returns it as a byte slice. func ExtendedAttributesFromTarHeader(hdr *tar.Header) ([]byte, error) { - var eas []winio.ExtendedAttribute - var eadata []byte - var err error + var eas []winio.ExtendedAttribute //nolint:prealloc // len(eas) <= len(hdr.PAXRecords); prealloc is wasteful for k, v := range hdr.PAXRecords { if !strings.HasPrefix(k, hdrEaPrefix) { continue @@ -155,13 +153,15 @@ func ExtendedAttributesFromTarHeader(hdr *tar.Header) ([]byte, error) { Value: data, }) } + var eaData []byte + var err error if len(eas) != 0 { - eadata, err = winio.EncodeExtendedAttributes(eas) + eaData, err = winio.EncodeExtendedAttributes(eas) if err != nil { return nil, err } } - return eadata, nil + return eaData, nil } // EncodeReparsePointFromTarHeader reads the ReparsePoint structure from the tar header @@ -182,11 +182,9 @@ func EncodeReparsePointFromTarHeader(hdr *tar.Header) []byte { // // The additional Win32 metadata is: // -// MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value -// -// MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format -// -// MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink) +// - MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value +// - MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format +// - MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink) func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error { name = filepath.ToSlash(name) hdr := BasicInfoHeader(name, size, fileInfo) @@ -209,7 +207,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size var dataHdr *winio.BackupHeader for dataHdr == nil { bhdr, err := br.Next() - if err == io.EOF { + if err == io.EOF { //nolint:errorlint break } if err != nil { @@ -217,21 +215,21 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size } switch bhdr.Id { case winio.BackupData: - hdr.Mode |= c_ISREG + hdr.Mode |= cISREG if !readTwice { dataHdr = bhdr } case winio.BackupSecurity: - sd, err := ioutil.ReadAll(br) + sd, err := io.ReadAll(br) if err != nil { return err } hdr.PAXRecords[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd) case winio.BackupReparseData: - hdr.Mode |= c_ISLNK + hdr.Mode |= cISLNK hdr.Typeflag = tar.TypeSymlink - reparseBuffer, err := ioutil.ReadAll(br) + reparseBuffer, _ := io.ReadAll(br) rp, err := winio.DecodeReparsePoint(reparseBuffer) if err != nil { return err @@ -242,7 +240,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size hdr.Linkname = rp.Target case winio.BackupEaData: - eab, err := ioutil.ReadAll(br) + eab, err := io.ReadAll(br) if err != nil { return err } @@ -276,7 +274,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size } for dataHdr == nil { bhdr, err := br.Next() - if err == io.EOF { + if err == io.EOF { //nolint:errorlint break } if err != nil { @@ -311,7 +309,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size // range of the file containing the range contents. Finally there is a sparse block stream with // size = 0 and offset = . - if dataHdr != nil { + if dataHdr != nil { //nolint:nestif // todo: reduce nesting complexity // A data stream was found. Copy the data. // We assume that we will either have a data stream size > 0 XOR have sparse block streams. if dataHdr.Size > 0 || (dataHdr.Attributes&winio.StreamSparseAttributes) == 0 { @@ -319,13 +317,13 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size) } if _, err = io.Copy(t, br); err != nil { - return fmt.Errorf("%s: copying contents from data stream: %s", name, err) + return fmt.Errorf("%s: copying contents from data stream: %w", name, err) } } else if size > 0 { // As of a recent OS change, BackupRead now returns a data stream for empty sparse files. // These files have no sparse block streams, so skip the copySparse call if file size = 0. if err = copySparse(t, br); err != nil { - return fmt.Errorf("%s: copying contents from sparse block stream: %s", name, err) + return fmt.Errorf("%s: copying contents from sparse block stream: %w", name, err) } } } @@ -335,7 +333,7 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size // been written. In practice, this means that we don't get EA or TXF metadata. for { bhdr, err := br.Next() - if err == io.EOF { + if err == io.EOF { //nolint:errorlint break } if err != nil { @@ -343,35 +341,30 @@ func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size } switch bhdr.Id { case winio.BackupAlternateData: - altName := bhdr.Name - if strings.HasSuffix(altName, ":$DATA") { - altName = altName[:len(altName)-len(":$DATA")] - } - if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 { - hdr = &tar.Header{ - Format: hdr.Format, - Name: name + altName, - Mode: hdr.Mode, - Typeflag: tar.TypeReg, - Size: bhdr.Size, - ModTime: hdr.ModTime, - AccessTime: hdr.AccessTime, - ChangeTime: hdr.ChangeTime, - } - err = t.WriteHeader(hdr) - if err != nil { - return err - } - _, err = io.Copy(t, br) - if err != nil { - return err - } - - } else { + if (bhdr.Attributes & winio.StreamSparseAttributes) != 0 { // Unsupported for now, since the size of the alternate stream is not present // in the backup stream until after the data has been read. return fmt.Errorf("%s: tar of sparse alternate data streams is unsupported", name) } + altName := strings.TrimSuffix(bhdr.Name, ":$DATA") + hdr = &tar.Header{ + Format: hdr.Format, + Name: name + altName, + Mode: hdr.Mode, + Typeflag: tar.TypeReg, + Size: bhdr.Size, + ModTime: hdr.ModTime, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + err = t.WriteHeader(hdr) + if err != nil { + return err + } + _, err = io.Copy(t, br) + if err != nil { + return err + } case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData: // ignore these streams default: @@ -413,7 +406,7 @@ func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *win } fileInfo.CreationTime = windows.NsecToFiletime(creationTime.UnixNano()) } - return + return name, size, fileInfo, err } // WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple @@ -474,7 +467,6 @@ func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) ( if err != nil { return nil, err } - } if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { diff --git a/vendor/github.com/Microsoft/go-winio/doc.go b/vendor/github.com/Microsoft/go-winio/doc.go new file mode 100644 index 0000000000000..1f5bfe2d548e6 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/doc.go @@ -0,0 +1,22 @@ +// This package provides utilities for efficiently performing Win32 IO operations in Go. +// Currently, this package is provides support for genreal IO and management of +// - named pipes +// - files +// - [Hyper-V sockets] +// +// This code is similar to Go's [net] package, and uses IO completion ports to avoid +// blocking IO on system threads, allowing Go to reuse the thread to schedule other goroutines. +// +// This limits support to Windows Vista and newer operating systems. +// +// Additionally, this package provides support for: +// - creating and managing GUIDs +// - writing to [ETW] +// - opening and manageing VHDs +// - parsing [Windows Image files] +// - auto-generating Win32 API code +// +// [Hyper-V sockets]: https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service +// [ETW]: https://docs.microsoft.com/en-us/windows-hardware/drivers/devtest/event-tracing-for-windows--etw- +// [Windows Image files]: https://docs.microsoft.com/en-us/windows-hardware/manufacture/desktop/work-with-windows-images +package winio diff --git a/vendor/github.com/Microsoft/go-winio/ea.go b/vendor/github.com/Microsoft/go-winio/ea.go index 4051c1b33bfee..e104dbdfdf96f 100644 --- a/vendor/github.com/Microsoft/go-winio/ea.go +++ b/vendor/github.com/Microsoft/go-winio/ea.go @@ -33,7 +33,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) if err != nil { err = errInvalidEaBuffer - return + return ea, nb, err } nameOffset := fileFullEaInformationSize @@ -43,7 +43,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { nextOffset := int(info.NextEntryOffset) if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { err = errInvalidEaBuffer - return + return ea, nb, err } ea.Name = string(b[nameOffset : nameOffset+nameLen]) @@ -52,7 +52,7 @@ func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { if info.NextEntryOffset != 0 { nb = b[info.NextEntryOffset:] } - return + return ea, nb, err } // DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION @@ -67,7 +67,7 @@ func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { eas = append(eas, ea) b = nb } - return + return eas, err } func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go index 293ab54c80c3d..175a99d3f429f 100644 --- a/vendor/github.com/Microsoft/go-winio/file.go +++ b/vendor/github.com/Microsoft/go-winio/file.go @@ -11,6 +11,8 @@ import ( "sync/atomic" "syscall" "time" + + "golang.org/x/sys/windows" ) //sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx @@ -24,6 +26,8 @@ type atomicBool int32 func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } + +//revive:disable-next-line:predeclared Keep "new" to maintain consistency with "atomic" pkg func (b *atomicBool) swap(new bool) bool { var newInt int32 if new { @@ -32,11 +36,6 @@ func (b *atomicBool) swap(new bool) bool { return atomic.SwapInt32((*int32)(b), newInt) == 1 } -const ( - cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 - cFILE_SKIP_SET_EVENT_ON_HANDLE = 2 -) - var ( ErrFileClosed = errors.New("file has already been closed") ErrTimeout = &timeoutError{} @@ -44,28 +43,28 @@ var ( type timeoutError struct{} -func (e *timeoutError) Error() string { return "i/o timeout" } -func (e *timeoutError) Timeout() bool { return true } -func (e *timeoutError) Temporary() bool { return true } +func (*timeoutError) Error() string { return "i/o timeout" } +func (*timeoutError) Timeout() bool { return true } +func (*timeoutError) Temporary() bool { return true } type timeoutChan chan struct{} var ioInitOnce sync.Once var ioCompletionPort syscall.Handle -// ioResult contains the result of an asynchronous IO operation +// ioResult contains the result of an asynchronous IO operation. type ioResult struct { bytes uint32 err error } -// ioOperation represents an outstanding asynchronous Win32 IO +// ioOperation represents an outstanding asynchronous Win32 IO. type ioOperation struct { o syscall.Overlapped ch chan ioResult } -func initIo() { +func initIO() { h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) if err != nil { panic(err) @@ -94,15 +93,15 @@ type deadlineHandler struct { timedout atomicBool } -// makeWin32File makes a new win32File from an existing file handle +// makeWin32File makes a new win32File from an existing file handle. func makeWin32File(h syscall.Handle) (*win32File, error) { f := &win32File{handle: h} - ioInitOnce.Do(initIo) + ioInitOnce.Do(initIO) _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) if err != nil { return nil, err } - err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE) + err = setFileCompletionNotificationModes(h, windows.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS|windows.FILE_SKIP_SET_EVENT_ON_HANDLE) if err != nil { return nil, err } @@ -121,14 +120,14 @@ func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { return f, nil } -// closeHandle closes the resources associated with a Win32 handle +// closeHandle closes the resources associated with a Win32 handle. func (f *win32File) closeHandle() { f.wgLock.Lock() // Atomically set that we are closing, releasing the resources only once. if !f.closing.swap(true) { f.wgLock.Unlock() // cancel all IO and wait for it to complete - cancelIoEx(f.handle, nil) + _ = cancelIoEx(f.handle, nil) f.wg.Wait() // at this point, no new IO can start syscall.Close(f.handle) @@ -144,14 +143,14 @@ func (f *win32File) Close() error { return nil } -// IsClosed checks if the file has been closed +// IsClosed checks if the file has been closed. func (f *win32File) IsClosed() bool { return f.closing.isSet() } -// prepareIo prepares for a new IO operation. +// prepareIO prepares for a new IO operation. // The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. -func (f *win32File) prepareIo() (*ioOperation, error) { +func (f *win32File) prepareIO() (*ioOperation, error) { f.wgLock.RLock() if f.closing.isSet() { f.wgLock.RUnlock() @@ -164,7 +163,7 @@ func (f *win32File) prepareIo() (*ioOperation, error) { return c, nil } -// ioCompletionProcessor processes completed async IOs forever +// ioCompletionProcessor processes completed async IOs forever. func ioCompletionProcessor(h syscall.Handle) { for { var bytes uint32 @@ -178,15 +177,17 @@ func ioCompletionProcessor(h syscall.Handle) { } } -// asyncIo processes the return value from ReadFile or WriteFile, blocking until +// todo: helsaawy - create an asyncIO version that takes a context + +// asyncIO processes the return value from ReadFile or WriteFile, blocking until // the operation has actually completed. -func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { - if err != syscall.ERROR_IO_PENDING { +func (f *win32File) asyncIO(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { + if err != syscall.ERROR_IO_PENDING { //nolint:errorlint // err is Errno return int(bytes), err } if f.closing.isSet() { - cancelIoEx(f.handle, &c.o) + _ = cancelIoEx(f.handle, &c.o) } var timeout timeoutChan @@ -200,7 +201,7 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er select { case r = <-c.ch: err = r.err - if err == syscall.ERROR_OPERATION_ABORTED { + if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno if f.closing.isSet() { err = ErrFileClosed } @@ -210,10 +211,10 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags) } case <-timeout: - cancelIoEx(f.handle, &c.o) + _ = cancelIoEx(f.handle, &c.o) r = <-c.ch err = r.err - if err == syscall.ERROR_OPERATION_ABORTED { + if err == syscall.ERROR_OPERATION_ABORTED { //nolint:errorlint // err is Errno err = ErrTimeout } } @@ -221,13 +222,14 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er // runtime.KeepAlive is needed, as c is passed via native // code to ioCompletionProcessor, c must remain alive // until the channel read is complete. + // todo: (de)allocate *ioOperation via win32 heap functions, instead of needing to KeepAlive? runtime.KeepAlive(c) return int(r.bytes), err } // Read reads from a file handle. func (f *win32File) Read(b []byte) (int, error) { - c, err := f.prepareIo() + c, err := f.prepareIO() if err != nil { return 0, err } @@ -239,13 +241,13 @@ func (f *win32File) Read(b []byte) (int, error) { var bytes uint32 err = syscall.ReadFile(f.handle, b, &bytes, &c.o) - n, err := f.asyncIo(c, &f.readDeadline, bytes, err) + n, err := f.asyncIO(c, &f.readDeadline, bytes, err) runtime.KeepAlive(b) // Handle EOF conditions. if err == nil && n == 0 && len(b) != 0 { return 0, io.EOF - } else if err == syscall.ERROR_BROKEN_PIPE { + } else if err == syscall.ERROR_BROKEN_PIPE { //nolint:errorlint // err is Errno return 0, io.EOF } else { return n, err @@ -254,7 +256,7 @@ func (f *win32File) Read(b []byte) (int, error) { // Write writes to a file handle. func (f *win32File) Write(b []byte) (int, error) { - c, err := f.prepareIo() + c, err := f.prepareIO() if err != nil { return 0, err } @@ -266,7 +268,7 @@ func (f *win32File) Write(b []byte) (int, error) { var bytes uint32 err = syscall.WriteFile(f.handle, b, &bytes, &c.o) - n, err := f.asyncIo(c, &f.writeDeadline, bytes, err) + n, err := f.asyncIO(c, &f.writeDeadline, bytes, err) runtime.KeepAlive(b) return n, err } diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go index 3ab6bff69c5d6..702950e72a49c 100644 --- a/vendor/github.com/Microsoft/go-winio/fileinfo.go +++ b/vendor/github.com/Microsoft/go-winio/fileinfo.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winio @@ -14,13 +15,18 @@ import ( type FileBasicInfo struct { CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime FileAttributes uint32 - pad uint32 // padding + _ uint32 // padding } // GetFileBasicInfo retrieves times and attributes for a file. func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { bi := &FileBasicInfo{} - if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + if err := windows.GetFileInformationByHandleEx( + windows.Handle(f.Fd()), + windows.FileBasicInfo, + (*byte)(unsafe.Pointer(bi)), + uint32(unsafe.Sizeof(*bi)), + ); err != nil { return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} } runtime.KeepAlive(f) @@ -29,7 +35,12 @@ func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { // SetFileBasicInfo sets times and attributes for a file. func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { - if err := windows.SetFileInformationByHandle(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + if err := windows.SetFileInformationByHandle( + windows.Handle(f.Fd()), + windows.FileBasicInfo, + (*byte)(unsafe.Pointer(bi)), + uint32(unsafe.Sizeof(*bi)), + ); err != nil { return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} } runtime.KeepAlive(f) @@ -48,7 +59,10 @@ type FileStandardInfo struct { // GetFileStandardInfo retrieves ended information for the file. func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) { si := &FileStandardInfo{} - if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileStandardInfo, (*byte)(unsafe.Pointer(si)), uint32(unsafe.Sizeof(*si))); err != nil { + if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), + windows.FileStandardInfo, + (*byte)(unsafe.Pointer(si)), + uint32(unsafe.Sizeof(*si))); err != nil { return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} } runtime.KeepAlive(f) @@ -65,7 +79,12 @@ type FileIDInfo struct { // GetFileID retrieves the unique (volume, file ID) pair for a file. func GetFileID(f *os.File) (*FileIDInfo, error) { fileID := &FileIDInfo{} - if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileIdInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil { + if err := windows.GetFileInformationByHandleEx( + windows.Handle(f.Fd()), + windows.FileIdInfo, + (*byte)(unsafe.Pointer(fileID)), + uint32(unsafe.Sizeof(*fileID)), + ); err != nil { return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} } runtime.KeepAlive(f) diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go index b2b644d002aab..52f1c280f6a75 100644 --- a/vendor/github.com/Microsoft/go-winio/hvsock.go +++ b/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -4,6 +4,8 @@ package winio import ( + "context" + "errors" "fmt" "io" "net" @@ -12,16 +14,87 @@ import ( "time" "unsafe" + "golang.org/x/sys/windows" + + "github.com/Microsoft/go-winio/internal/socket" "github.com/Microsoft/go-winio/pkg/guid" ) -//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind +const afHVSock = 34 // AF_HYPERV -const ( - afHvSock = 34 // AF_HYPERV +// Well known Service and VM IDs +//https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/make-integration-service#vmid-wildcards - socketError = ^uintptr(0) -) +// HvsockGUIDWildcard is the wildcard VmId for accepting connections from all partitions. +func HvsockGUIDWildcard() guid.GUID { // 00000000-0000-0000-0000-000000000000 + return guid.GUID{} +} + +// HvsockGUIDBroadcast is the wildcard VmId for broadcasting sends to all partitions. +func HvsockGUIDBroadcast() guid.GUID { //ffffffff-ffff-ffff-ffff-ffffffffffff + return guid.GUID{ + Data1: 0xffffffff, + Data2: 0xffff, + Data3: 0xffff, + Data4: [8]uint8{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, + } +} + +// HvsockGUIDLoopback is the Loopback VmId for accepting connections to the same partition as the connector. +func HvsockGUIDLoopback() guid.GUID { // e0e16197-dd56-4a10-9195-5ee7a155a838 + return guid.GUID{ + Data1: 0xe0e16197, + Data2: 0xdd56, + Data3: 0x4a10, + Data4: [8]uint8{0x91, 0x95, 0x5e, 0xe7, 0xa1, 0x55, 0xa8, 0x38}, + } +} + +// HvsockGUIDSiloHost is the address of a silo's host partition: +// - The silo host of a hosted silo is the utility VM. +// - The silo host of a silo on a physical host is the physical host. +func HvsockGUIDSiloHost() guid.GUID { // 36bd0c5c-7276-4223-88ba-7d03b654c568 + return guid.GUID{ + Data1: 0x36bd0c5c, + Data2: 0x7276, + Data3: 0x4223, + Data4: [8]byte{0x88, 0xba, 0x7d, 0x03, 0xb6, 0x54, 0xc5, 0x68}, + } +} + +// HvsockGUIDChildren is the wildcard VmId for accepting connections from the connector's child partitions. +func HvsockGUIDChildren() guid.GUID { // 90db8b89-0d35-4f79-8ce9-49ea0ac8b7cd + return guid.GUID{ + Data1: 0x90db8b89, + Data2: 0xd35, + Data3: 0x4f79, + Data4: [8]uint8{0x8c, 0xe9, 0x49, 0xea, 0xa, 0xc8, 0xb7, 0xcd}, + } +} + +// HvsockGUIDParent is the wildcard VmId for accepting connections from the connector's parent partition. +// Listening on this VmId accepts connection from: +// - Inside silos: silo host partition. +// - Inside hosted silo: host of the VM. +// - Inside VM: VM host. +// - Physical host: Not supported. +func HvsockGUIDParent() guid.GUID { // a42e7cda-d03f-480c-9cc2-a4de20abb878 + return guid.GUID{ + Data1: 0xa42e7cda, + Data2: 0xd03f, + Data3: 0x480c, + Data4: [8]uint8{0x9c, 0xc2, 0xa4, 0xde, 0x20, 0xab, 0xb8, 0x78}, + } +} + +// hvsockVsockServiceTemplate is the Service GUID used for the VSOCK protocol. +func hvsockVsockServiceTemplate() guid.GUID { // 00000000-facb-11e6-bd58-64006a7986d3 + return guid.GUID{ + Data2: 0xfacb, + Data3: 0x11e6, + Data4: [8]uint8{0xbd, 0x58, 0x64, 0x00, 0x6a, 0x79, 0x86, 0xd3}, + } +} // An HvsockAddr is an address for a AF_HYPERV socket. type HvsockAddr struct { @@ -36,8 +109,10 @@ type rawHvsockAddr struct { ServiceID guid.GUID } +var _ socket.RawSockaddr = &rawHvsockAddr{} + // Network returns the address's network name, "hvsock". -func (addr *HvsockAddr) Network() string { +func (*HvsockAddr) Network() string { return "hvsock" } @@ -47,14 +122,14 @@ func (addr *HvsockAddr) String() string { // VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port. func VsockServiceID(port uint32) guid.GUID { - g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3") + g := hvsockVsockServiceTemplate() // make a copy g.Data1 = port return g } func (addr *HvsockAddr) raw() rawHvsockAddr { return rawHvsockAddr{ - Family: afHvSock, + Family: afHVSock, VMID: addr.VMID, ServiceID: addr.ServiceID, } @@ -65,20 +140,48 @@ func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) { addr.ServiceID = raw.ServiceID } +// Sockaddr returns a pointer to and the size of this struct. +// +// Implements the [socket.RawSockaddr] interface, and allows use in +// [socket.Bind] and [socket.ConnectEx]. +func (r *rawHvsockAddr) Sockaddr() (unsafe.Pointer, int32, error) { + return unsafe.Pointer(r), int32(unsafe.Sizeof(rawHvsockAddr{})), nil +} + +// Sockaddr interface allows use with `sockets.Bind()` and `.ConnectEx()`. +func (r *rawHvsockAddr) FromBytes(b []byte) error { + n := int(unsafe.Sizeof(rawHvsockAddr{})) + + if len(b) < n { + return fmt.Errorf("got %d, want %d: %w", len(b), n, socket.ErrBufferSize) + } + + copy(unsafe.Slice((*byte)(unsafe.Pointer(r)), n), b[:n]) + if r.Family != afHVSock { + return fmt.Errorf("got %d, want %d: %w", r.Family, afHVSock, socket.ErrAddrFamily) + } + + return nil +} + // HvsockListener is a socket listener for the AF_HYPERV address family. type HvsockListener struct { sock *win32File addr HvsockAddr } +var _ net.Listener = &HvsockListener{} + // HvsockConn is a connected socket of the AF_HYPERV address family. type HvsockConn struct { sock *win32File local, remote HvsockAddr } -func newHvSocket() (*win32File, error) { - fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1) +var _ net.Conn = &HvsockConn{} + +func newHVSocket() (*win32File, error) { + fd, err := syscall.Socket(afHVSock, syscall.SOCK_STREAM, 1) if err != nil { return nil, os.NewSyscallError("socket", err) } @@ -94,12 +197,12 @@ func newHvSocket() (*win32File, error) { // ListenHvsock listens for connections on the specified hvsock address. func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { l := &HvsockListener{addr: *addr} - sock, err := newHvSocket() + sock, err := newHVSocket() if err != nil { return nil, l.opErr("listen", err) } sa := addr.raw() - err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa))) + err = socket.Bind(windows.Handle(sock.handle), &sa) if err != nil { return nil, l.opErr("listen", os.NewSyscallError("socket", err)) } @@ -121,7 +224,7 @@ func (l *HvsockListener) Addr() net.Addr { // Accept waits for the next connection and returns it. func (l *HvsockListener) Accept() (_ net.Conn, err error) { - sock, err := newHvSocket() + sock, err := newHVSocket() if err != nil { return nil, l.opErr("accept", err) } @@ -130,27 +233,42 @@ func (l *HvsockListener) Accept() (_ net.Conn, err error) { sock.Close() } }() - c, err := l.sock.prepareIo() + c, err := l.sock.prepareIO() if err != nil { return nil, l.opErr("accept", err) } defer l.sock.wg.Done() // AcceptEx, per documentation, requires an extra 16 bytes per address. + // + // https://docs.microsoft.com/en-us/windows/win32/api/mswsock/nf-mswsock-acceptex const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{})) var addrbuf [addrlen * 2]byte var bytes uint32 - err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o) - _, err = l.sock.asyncIo(c, nil, bytes, err) - if err != nil { + err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0 /*rxdatalen*/, addrlen, addrlen, &bytes, &c.o) + if _, err = l.sock.asyncIO(c, nil, bytes, err); err != nil { return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) } + conn := &HvsockConn{ sock: sock, } + // The local address returned in the AcceptEx buffer is the same as the Listener socket's + // address. However, the service GUID reported by GetSockName is different from the Listeners + // socket, and is sometimes the same as the local address of the socket that dialed the + // address, with the service GUID.Data1 incremented, but othertimes is different. + // todo: does the local address matter? is the listener's address or the actual address appropriate? conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0]))) conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) + + // initialize the accepted socket and update its properties with those of the listening socket + if err = windows.Setsockopt(windows.Handle(sock.handle), + windows.SOL_SOCKET, windows.SO_UPDATE_ACCEPT_CONTEXT, + (*byte)(unsafe.Pointer(&l.sock.handle)), int32(unsafe.Sizeof(l.sock.handle))); err != nil { + return nil, conn.opErr("accept", os.NewSyscallError("setsockopt", err)) + } + sock = nil return conn, nil } @@ -160,43 +278,171 @@ func (l *HvsockListener) Close() error { return l.sock.Close() } -/* Need to finish ConnectEx handling -func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) { - sock, err := newHvSocket() +// HvsockDialer configures and dials a Hyper-V Socket (ie, [HvsockConn]). +type HvsockDialer struct { + // Deadline is the time the Dial operation must connect before erroring. + Deadline time.Time + + // Retries is the number of additional connects to try if the connection times out, is refused, + // or the host is unreachable + Retries uint + + // RetryWait is the time to wait after a connection error to retry + RetryWait time.Duration + + rt *time.Timer // redial wait timer +} + +// Dial the Hyper-V socket at addr. +// +// See [HvsockDialer.Dial] for more information. +func Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) { + return (&HvsockDialer{}).Dial(ctx, addr) +} + +// Dial attempts to connect to the Hyper-V socket at addr, and returns a connection if successful. +// Will attempt (HvsockDialer).Retries if dialing fails, waiting (HvsockDialer).RetryWait between +// retries. +// +// Dialing can be cancelled either by providing (HvsockDialer).Deadline, or cancelling ctx. +func (d *HvsockDialer) Dial(ctx context.Context, addr *HvsockAddr) (conn *HvsockConn, err error) { + op := "dial" + // create the conn early to use opErr() + conn = &HvsockConn{ + remote: *addr, + } + + if !d.Deadline.IsZero() { + var cancel context.CancelFunc + ctx, cancel = context.WithDeadline(ctx, d.Deadline) + defer cancel() + } + + // preemptive timeout/cancellation check + if err = ctx.Err(); err != nil { + return nil, conn.opErr(op, err) + } + + sock, err := newHVSocket() if err != nil { - return nil, err + return nil, conn.opErr(op, err) } defer func() { if sock != nil { sock.Close() } }() - c, err := sock.prepareIo() + + sa := addr.raw() + err = socket.Bind(windows.Handle(sock.handle), &sa) if err != nil { - return nil, err + return nil, conn.opErr(op, os.NewSyscallError("bind", err)) + } + + c, err := sock.prepareIO() + if err != nil { + return nil, conn.opErr(op, err) } defer sock.wg.Done() var bytes uint32 - err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o) - _, err = sock.asyncIo(ctx, c, nil, bytes, err) + for i := uint(0); i <= d.Retries; i++ { + err = socket.ConnectEx( + windows.Handle(sock.handle), + &sa, + nil, // sendBuf + 0, // sendDataLen + &bytes, + (*windows.Overlapped)(unsafe.Pointer(&c.o))) + _, err = sock.asyncIO(c, nil, bytes, err) + if i < d.Retries && canRedial(err) { + if err = d.redialWait(ctx); err == nil { + continue + } + } + break + } if err != nil { - return nil, err + return nil, conn.opErr(op, os.NewSyscallError("connectex", err)) } - conn := &HvsockConn{ - sock: sock, - remote: *addr, + + // update the connection properties, so shutdown can be used + if err = windows.Setsockopt( + windows.Handle(sock.handle), + windows.SOL_SOCKET, + windows.SO_UPDATE_CONNECT_CONTEXT, + nil, // optvalue + 0, // optlen + ); err != nil { + return nil, conn.opErr(op, os.NewSyscallError("setsockopt", err)) + } + + // get the local name + var sal rawHvsockAddr + err = socket.GetSockName(windows.Handle(sock.handle), &sal) + if err != nil { + return nil, conn.opErr(op, os.NewSyscallError("getsockname", err)) + } + conn.local.fromRaw(&sal) + + // one last check for timeout, since asyncIO doesn't check the context + if err = ctx.Err(); err != nil { + return nil, conn.opErr(op, err) } + + conn.sock = sock sock = nil + return conn, nil } -*/ + +// redialWait waits before attempting to redial, resetting the timer as appropriate. +func (d *HvsockDialer) redialWait(ctx context.Context) (err error) { + if d.RetryWait == 0 { + return nil + } + + if d.rt == nil { + d.rt = time.NewTimer(d.RetryWait) + } else { + // should already be stopped and drained + d.rt.Reset(d.RetryWait) + } + + select { + case <-ctx.Done(): + case <-d.rt.C: + return nil + } + + // stop and drain the timer + if !d.rt.Stop() { + <-d.rt.C + } + return ctx.Err() +} + +// assumes error is a plain, unwrapped syscall.Errno provided by direct syscall. +func canRedial(err error) bool { + //nolint:errorlint // guaranteed to be an Errno + switch err { + case windows.WSAECONNREFUSED, windows.WSAENETUNREACH, windows.WSAETIMEDOUT, + windows.ERROR_CONNECTION_REFUSED, windows.ERROR_CONNECTION_UNAVAIL: + return true + default: + return false + } +} func (conn *HvsockConn) opErr(op string, err error) error { + // translate from "file closed" to "socket closed" + if errors.Is(err, ErrFileClosed) { + err = socket.ErrSocketClosed + } return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err} } func (conn *HvsockConn) Read(b []byte) (int, error) { - c, err := conn.sock.prepareIo() + c, err := conn.sock.prepareIO() if err != nil { return 0, conn.opErr("read", err) } @@ -204,10 +450,11 @@ func (conn *HvsockConn) Read(b []byte) (int, error) { buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} var flags, bytes uint32 err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) - n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err) + n, err := conn.sock.asyncIO(c, &conn.sock.readDeadline, bytes, err) if err != nil { - if _, ok := err.(syscall.Errno); ok { - err = os.NewSyscallError("wsarecv", err) + var eno windows.Errno + if errors.As(err, &eno) { + err = os.NewSyscallError("wsarecv", eno) } return 0, conn.opErr("read", err) } else if n == 0 { @@ -230,7 +477,7 @@ func (conn *HvsockConn) Write(b []byte) (int, error) { } func (conn *HvsockConn) write(b []byte) (int, error) { - c, err := conn.sock.prepareIo() + c, err := conn.sock.prepareIO() if err != nil { return 0, conn.opErr("write", err) } @@ -238,10 +485,11 @@ func (conn *HvsockConn) write(b []byte) (int, error) { buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} var bytes uint32 err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) - n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err) + n, err := conn.sock.asyncIO(c, &conn.sock.writeDeadline, bytes, err) if err != nil { - if _, ok := err.(syscall.Errno); ok { - err = os.NewSyscallError("wsasend", err) + var eno windows.Errno + if errors.As(err, &eno) { + err = os.NewSyscallError("wsasend", eno) } return 0, conn.opErr("write", err) } @@ -257,13 +505,19 @@ func (conn *HvsockConn) IsClosed() bool { return conn.sock.IsClosed() } +// shutdown disables sending or receiving on a socket. func (conn *HvsockConn) shutdown(how int) error { if conn.IsClosed() { - return ErrFileClosed + return socket.ErrSocketClosed } err := syscall.Shutdown(conn.sock.handle, how) if err != nil { + // If the connection was closed, shutdowns fail with "not connected" + if errors.Is(err, windows.WSAENOTCONN) || + errors.Is(err, windows.WSAESHUTDOWN) { + err = socket.ErrSocketClosed + } return os.NewSyscallError("shutdown", err) } return nil @@ -273,7 +527,7 @@ func (conn *HvsockConn) shutdown(how int) error { func (conn *HvsockConn) CloseRead() error { err := conn.shutdown(syscall.SHUT_RD) if err != nil { - return conn.opErr("close", err) + return conn.opErr("closeread", err) } return nil } @@ -283,7 +537,7 @@ func (conn *HvsockConn) CloseRead() error { func (conn *HvsockConn) CloseWrite() error { err := conn.shutdown(syscall.SHUT_WR) if err != nil { - return conn.opErr("close", err) + return conn.opErr("closewrite", err) } return nil } @@ -300,8 +554,13 @@ func (conn *HvsockConn) RemoteAddr() net.Addr { // SetDeadline implements the net.Conn SetDeadline method. func (conn *HvsockConn) SetDeadline(t time.Time) error { - conn.SetReadDeadline(t) - conn.SetWriteDeadline(t) + // todo: implement `SetDeadline` for `win32File` + if err := conn.SetReadDeadline(t); err != nil { + return fmt.Errorf("set read deadline: %w", err) + } + if err := conn.SetWriteDeadline(t); err != nil { + return fmt.Errorf("set write deadline: %w", err) + } return nil } diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go b/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go new file mode 100644 index 0000000000000..7e82f9afa9525 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/socket/rawaddr.go @@ -0,0 +1,20 @@ +package socket + +import ( + "unsafe" +) + +// RawSockaddr allows structs to be used with [Bind] and [ConnectEx]. The +// struct must meet the Win32 sockaddr requirements specified here: +// https://docs.microsoft.com/en-us/windows/win32/winsock/sockaddr-2 +// +// Specifically, the struct size must be least larger than an int16 (unsigned short) +// for the address family. +type RawSockaddr interface { + // Sockaddr returns a pointer to the RawSockaddr and its struct size, allowing + // for the RawSockaddr's data to be overwritten by syscalls (if necessary). + // + // It is the callers responsibility to validate that the values are valid; invalid + // pointers or size can cause a panic. + Sockaddr() (unsafe.Pointer, int32, error) +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go new file mode 100644 index 0000000000000..39e8c05f8f3fa --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/socket/socket.go @@ -0,0 +1,179 @@ +//go:build windows + +package socket + +import ( + "errors" + "fmt" + "net" + "sync" + "syscall" + "unsafe" + + "github.com/Microsoft/go-winio/pkg/guid" + "golang.org/x/sys/windows" +) + +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go socket.go + +//sys getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getsockname +//sys getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) [failretval==socketError] = ws2_32.getpeername +//sys bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind + +const socketError = uintptr(^uint32(0)) + +var ( + // todo(helsaawy): create custom error types to store the desired vs actual size and addr family? + + ErrBufferSize = errors.New("buffer size") + ErrAddrFamily = errors.New("address family") + ErrInvalidPointer = errors.New("invalid pointer") + ErrSocketClosed = fmt.Errorf("socket closed: %w", net.ErrClosed) +) + +// todo(helsaawy): replace these with generics, ie: GetSockName[S RawSockaddr](s windows.Handle) (S, error) + +// GetSockName writes the local address of socket s to the [RawSockaddr] rsa. +// If rsa is not large enough, the [windows.WSAEFAULT] is returned. +func GetSockName(s windows.Handle, rsa RawSockaddr) error { + ptr, l, err := rsa.Sockaddr() + if err != nil { + return fmt.Errorf("could not retrieve socket pointer and size: %w", err) + } + + // although getsockname returns WSAEFAULT if the buffer is too small, it does not set + // &l to the correct size, so--apart from doubling the buffer repeatedly--there is no remedy + return getsockname(s, ptr, &l) +} + +// GetPeerName returns the remote address the socket is connected to. +// +// See [GetSockName] for more information. +func GetPeerName(s windows.Handle, rsa RawSockaddr) error { + ptr, l, err := rsa.Sockaddr() + if err != nil { + return fmt.Errorf("could not retrieve socket pointer and size: %w", err) + } + + return getpeername(s, ptr, &l) +} + +func Bind(s windows.Handle, rsa RawSockaddr) (err error) { + ptr, l, err := rsa.Sockaddr() + if err != nil { + return fmt.Errorf("could not retrieve socket pointer and size: %w", err) + } + + return bind(s, ptr, l) +} + +// "golang.org/x/sys/windows".ConnectEx and .Bind only accept internal implementations of the +// their sockaddr interface, so they cannot be used with HvsockAddr +// Replicate functionality here from +// https://cs.opensource.google/go/x/sys/+/master:windows/syscall_windows.go + +// The function pointers to `AcceptEx`, `ConnectEx` and `GetAcceptExSockaddrs` must be loaded at +// runtime via a WSAIoctl call: +// https://docs.microsoft.com/en-us/windows/win32/api/Mswsock/nc-mswsock-lpfn_connectex#remarks + +type runtimeFunc struct { + id guid.GUID + once sync.Once + addr uintptr + err error +} + +func (f *runtimeFunc) Load() error { + f.once.Do(func() { + var s windows.Handle + s, f.err = windows.Socket(windows.AF_INET, windows.SOCK_STREAM, windows.IPPROTO_TCP) + if f.err != nil { + return + } + defer windows.CloseHandle(s) //nolint:errcheck + + var n uint32 + f.err = windows.WSAIoctl(s, + windows.SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&f.id)), + uint32(unsafe.Sizeof(f.id)), + (*byte)(unsafe.Pointer(&f.addr)), + uint32(unsafe.Sizeof(f.addr)), + &n, + nil, //overlapped + 0, //completionRoutine + ) + }) + return f.err +} + +var ( + // todo: add `AcceptEx` and `GetAcceptExSockaddrs` + WSAID_CONNECTEX = guid.GUID{ //revive:disable-line:var-naming ALL_CAPS + Data1: 0x25a207b9, + Data2: 0xddf3, + Data3: 0x4660, + Data4: [8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e}, + } + + connectExFunc = runtimeFunc{id: WSAID_CONNECTEX} +) + +func ConnectEx( + fd windows.Handle, + rsa RawSockaddr, + sendBuf *byte, + sendDataLen uint32, + bytesSent *uint32, + overlapped *windows.Overlapped, +) error { + if err := connectExFunc.Load(); err != nil { + return fmt.Errorf("failed to load ConnectEx function pointer: %w", err) + } + ptr, n, err := rsa.Sockaddr() + if err != nil { + return err + } + return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped) +} + +// BOOL LpfnConnectex( +// [in] SOCKET s, +// [in] const sockaddr *name, +// [in] int namelen, +// [in, optional] PVOID lpSendBuffer, +// [in] DWORD dwSendDataLength, +// [out] LPDWORD lpdwBytesSent, +// [in] LPOVERLAPPED lpOverlapped +// ) + +func connectEx( + s windows.Handle, + name unsafe.Pointer, + namelen int32, + sendBuf *byte, + sendDataLen uint32, + bytesSent *uint32, + overlapped *windows.Overlapped, +) (err error) { + // todo: after upgrading to 1.18, switch from syscall.Syscall9 to syscall.SyscallN + r1, _, e1 := syscall.Syscall9(connectExFunc.addr, + 7, + uintptr(s), + uintptr(name), + uintptr(namelen), + uintptr(unsafe.Pointer(sendBuf)), + uintptr(sendDataLen), + uintptr(unsafe.Pointer(bytesSent)), + uintptr(unsafe.Pointer(overlapped)), + 0, + 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return err +} diff --git a/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go new file mode 100644 index 0000000000000..6d2e1a9e44389 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/internal/socket/zsyscall_windows.go @@ -0,0 +1,72 @@ +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. + +package socket + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") + + procbind = modws2_32.NewProc("bind") + procgetpeername = modws2_32.NewProc("getpeername") + procgetsockname = modws2_32.NewProc("getsockname") +) + +func bind(s windows.Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socketError { + err = errnoErr(e1) + } + return +} + +func getpeername(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) + if r1 == socketError { + err = errnoErr(e1) + } + return +} + +func getsockname(s windows.Handle, name unsafe.Pointer, namelen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(name), uintptr(unsafe.Pointer(namelen))) + if r1 == socketError { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go index 96700a73de25a..ca6e38fc0006c 100644 --- a/vendor/github.com/Microsoft/go-winio/pipe.go +++ b/vendor/github.com/Microsoft/go-winio/pipe.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winio @@ -13,6 +14,8 @@ import ( "syscall" "time" "unsafe" + + "golang.org/x/sys/windows" ) //sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe @@ -21,10 +24,10 @@ import ( //sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo //sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc -//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile -//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb -//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U -//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl +//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) = ntdll.NtCreateNamedPipeFile +//sys rtlNtStatusToDosError(status ntStatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb +//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) = ntdll.RtlDosPathNameToNtPathName_U +//sys rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) = ntdll.RtlDefaultNpAcl type ioStatusBlock struct { Status, Information uintptr @@ -51,45 +54,22 @@ type securityDescriptor struct { Control uint16 Owner uintptr Group uintptr - Sacl uintptr - Dacl uintptr + Sacl uintptr //revive:disable-line:var-naming SACL, not Sacl + Dacl uintptr //revive:disable-line:var-naming DACL, not Dacl } -type ntstatus int32 +type ntStatus int32 -func (status ntstatus) Err() error { +func (status ntStatus) Err() error { if status >= 0 { return nil } return rtlNtStatusToDosError(status) } -const ( - cERROR_PIPE_BUSY = syscall.Errno(231) - cERROR_NO_DATA = syscall.Errno(232) - cERROR_PIPE_CONNECTED = syscall.Errno(535) - cERROR_SEM_TIMEOUT = syscall.Errno(121) - - cSECURITY_SQOS_PRESENT = 0x100000 - cSECURITY_ANONYMOUS = 0 - - cPIPE_TYPE_MESSAGE = 4 - - cPIPE_READMODE_MESSAGE = 2 - - cFILE_OPEN = 1 - cFILE_CREATE = 2 - - cFILE_PIPE_MESSAGE_TYPE = 1 - cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2 - - cSE_DACL_PRESENT = 4 -) - var ( // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed. - // This error should match net.errClosing since docker takes a dependency on its text. - ErrPipeListenerClosed = errors.New("use of closed network connection") + ErrPipeListenerClosed = net.ErrClosed errPipeWriteClosed = errors.New("pipe has been closed for write") ) @@ -116,9 +96,10 @@ func (f *win32Pipe) RemoteAddr() net.Addr { } func (f *win32Pipe) SetDeadline(t time.Time) error { - f.SetReadDeadline(t) - f.SetWriteDeadline(t) - return nil + if err := f.SetReadDeadline(t); err != nil { + return err + } + return f.SetWriteDeadline(t) } // CloseWrite closes the write side of a message pipe in byte mode. @@ -157,14 +138,14 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) { return 0, io.EOF } n, err := f.win32File.Read(b) - if err == io.EOF { + if err == io.EOF { //nolint:errorlint // If this was the result of a zero-byte read, then // it is possible that the read was due to a zero-size // message. Since we are simulating CloseWrite with a // zero-byte message, ensure that all future Read() calls // also return EOF. f.readEOF = true - } else if err == syscall.ERROR_MORE_DATA { + } else if err == syscall.ERROR_MORE_DATA { //nolint:errorlint // err is Errno // ERROR_MORE_DATA indicates that the pipe's read mode is message mode // and the message still has more bytes. Treat this as a success, since // this package presents all named pipes as byte streams. @@ -173,7 +154,7 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) { return n, err } -func (s pipeAddress) Network() string { +func (pipeAddress) Network() string { return "pipe" } @@ -184,16 +165,21 @@ func (s pipeAddress) String() string { // tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) { for { - select { case <-ctx.Done(): return syscall.Handle(0), ctx.Err() default: - h, err := createFile(*path, access, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) + h, err := createFile(*path, + access, + 0, + nil, + syscall.OPEN_EXISTING, + windows.FILE_FLAG_OVERLAPPED|windows.SECURITY_SQOS_PRESENT|windows.SECURITY_ANONYMOUS, + 0) if err == nil { return h, nil } - if err != cERROR_PIPE_BUSY { + if err != windows.ERROR_PIPE_BUSY { //nolint:errorlint // err is Errno return h, &os.PathError{Err: err, Op: "open", Path: *path} } // Wait 10 msec and try again. This is a rather simplistic @@ -213,9 +199,10 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { } else { absTimeout = time.Now().Add(2 * time.Second) } - ctx, _ := context.WithDeadline(context.Background(), absTimeout) + ctx, cancel := context.WithDeadline(context.Background(), absTimeout) + defer cancel() conn, err := DialPipeContext(ctx, path) - if err == context.DeadlineExceeded { + if errors.Is(err, context.DeadlineExceeded) { return nil, ErrTimeout } return conn, err @@ -251,7 +238,7 @@ func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, // If the pipe is in message mode, return a message byte pipe, which // supports CloseWrite(). - if flags&cPIPE_TYPE_MESSAGE != 0 { + if flags&windows.PIPE_TYPE_MESSAGE != 0 { return &win32MessageBytePipe{ win32Pipe: win32Pipe{win32File: f, path: path}, }, nil @@ -283,7 +270,11 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy oa.Length = unsafe.Sizeof(oa) var ntPath unicodeString - if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil { + if err := rtlDosPathNameToNtPathName(&path16[0], + &ntPath, + 0, + 0, + ).Err(); err != nil { return 0, &os.PathError{Op: "open", Path: path, Err: err} } defer localFree(ntPath.Buffer) @@ -292,8 +283,8 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy // The security descriptor is only needed for the first pipe. if first { if sd != nil { - len := uint32(len(sd)) - sdb := localAlloc(0, len) + l := uint32(len(sd)) + sdb := localAlloc(0, l) defer localFree(sdb) copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd) oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb)) @@ -301,28 +292,28 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy // Construct the default named pipe security descriptor. var dacl uintptr if err := rtlDefaultNpAcl(&dacl).Err(); err != nil { - return 0, fmt.Errorf("getting default named pipe ACL: %s", err) + return 0, fmt.Errorf("getting default named pipe ACL: %w", err) } defer localFree(dacl) sdb := &securityDescriptor{ Revision: 1, - Control: cSE_DACL_PRESENT, + Control: windows.SE_DACL_PRESENT, Dacl: dacl, } oa.SecurityDescriptor = sdb } } - typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS) + typ := uint32(windows.FILE_PIPE_REJECT_REMOTE_CLIENTS) if c.MessageMode { - typ |= cFILE_PIPE_MESSAGE_TYPE + typ |= windows.FILE_PIPE_MESSAGE_TYPE } - disposition := uint32(cFILE_OPEN) + disposition := uint32(windows.FILE_OPEN) access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE) if first { - disposition = cFILE_CREATE + disposition = windows.FILE_CREATE // By not asking for read or write access, the named pipe file system // will put this pipe into an initially disconnected state, blocking // client connections until the next call with first == false. @@ -335,7 +326,20 @@ func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (sy h syscall.Handle iosb ioStatusBlock ) - err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err() + err = ntCreateNamedPipeFile(&h, + access, + &oa, + &iosb, + syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, + disposition, + 0, + typ, + 0, + 0, + 0xffffffff, + uint32(c.InputBufferSize), + uint32(c.OutputBufferSize), + &timeout).Err() if err != nil { return 0, &os.PathError{Op: "open", Path: path, Err: err} } @@ -380,7 +384,7 @@ func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) { p.Close() p = nil err = <-ch - if err == nil || err == ErrFileClosed { + if err == nil || err == ErrFileClosed { //nolint:errorlint // err is Errno err = ErrPipeListenerClosed } } @@ -402,12 +406,12 @@ func (l *win32PipeListener) listenerRoutine() { p, err = l.makeConnectedServerPipe() // If the connection was immediately closed by the client, try // again. - if err != cERROR_NO_DATA { + if err != windows.ERROR_NO_DATA { //nolint:errorlint // err is Errno break } } responseCh <- acceptResponse{p, err} - closed = err == ErrPipeListenerClosed + closed = err == ErrPipeListenerClosed //nolint:errorlint // err is Errno } } syscall.Close(l.firstHandle) @@ -469,15 +473,15 @@ func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { } func connectPipe(p *win32File) error { - c, err := p.prepareIo() + c, err := p.prepareIO() if err != nil { return err } defer p.wg.Done() err = connectNamedPipe(p.handle, &c.o) - _, err = p.asyncIo(c, nil, 0, err) - if err != nil && err != cERROR_PIPE_CONNECTED { + _, err = p.asyncIO(c, nil, 0, err) + if err != nil && err != windows.ERROR_PIPE_CONNECTED { //nolint:errorlint // err is Errno return err } return nil diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/doc.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/doc.go new file mode 100644 index 0000000000000..888def77669e1 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/etw/doc.go @@ -0,0 +1,8 @@ +// Package etw provides support for TraceLogging-based ETW (Event Tracing +// for Windows). TraceLogging is a format of ETW events that are self-describing +// (the event contains information on its own schema). This allows them to be +// decoded without needing a separate manifest with event information. The +// implementation here is based on the information found in +// TraceLoggingProvider.h in the Windows SDK, which implements TraceLogging as a +// set of C macros. +package etw diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/eventdata.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/eventdata.go index abf16803ee40d..a63547549434d 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/eventdata.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/etw/eventdata.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package etw @@ -14,60 +15,60 @@ type eventData struct { buffer bytes.Buffer } -// bytes returns the raw binary data containing the event data. The returned +// toBytes returns the raw binary data containing the event data. The returned // value is not copied from the internal buffer, so it can be mutated by the // eventData object after it is returned. -func (ed *eventData) bytes() []byte { +func (ed *eventData) toBytes() []byte { return ed.buffer.Bytes() } // writeString appends a string, including the null terminator, to the buffer. func (ed *eventData) writeString(data string) { - ed.buffer.WriteString(data) - ed.buffer.WriteByte(0) + _, _ = ed.buffer.WriteString(data) + _ = ed.buffer.WriteByte(0) } // writeInt8 appends a int8 to the buffer. func (ed *eventData) writeInt8(value int8) { - ed.buffer.WriteByte(uint8(value)) + _ = ed.buffer.WriteByte(uint8(value)) } // writeInt16 appends a int16 to the buffer. func (ed *eventData) writeInt16(value int16) { - binary.Write(&ed.buffer, binary.LittleEndian, value) + _ = binary.Write(&ed.buffer, binary.LittleEndian, value) } // writeInt32 appends a int32 to the buffer. func (ed *eventData) writeInt32(value int32) { - binary.Write(&ed.buffer, binary.LittleEndian, value) + _ = binary.Write(&ed.buffer, binary.LittleEndian, value) } // writeInt64 appends a int64 to the buffer. func (ed *eventData) writeInt64(value int64) { - binary.Write(&ed.buffer, binary.LittleEndian, value) + _ = binary.Write(&ed.buffer, binary.LittleEndian, value) } // writeUint8 appends a uint8 to the buffer. func (ed *eventData) writeUint8(value uint8) { - ed.buffer.WriteByte(value) + _ = ed.buffer.WriteByte(value) } // writeUint16 appends a uint16 to the buffer. func (ed *eventData) writeUint16(value uint16) { - binary.Write(&ed.buffer, binary.LittleEndian, value) + _ = binary.Write(&ed.buffer, binary.LittleEndian, value) } // writeUint32 appends a uint32 to the buffer. func (ed *eventData) writeUint32(value uint32) { - binary.Write(&ed.buffer, binary.LittleEndian, value) + _ = binary.Write(&ed.buffer, binary.LittleEndian, value) } // writeUint64 appends a uint64 to the buffer. func (ed *eventData) writeUint64(value uint64) { - binary.Write(&ed.buffer, binary.LittleEndian, value) + _ = binary.Write(&ed.buffer, binary.LittleEndian, value) } // writeFiletime appends a FILETIME to the buffer. func (ed *eventData) writeFiletime(value syscall.Filetime) { - binary.Write(&ed.buffer, binary.LittleEndian, value) + _ = binary.Write(&ed.buffer, binary.LittleEndian, value) } diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/eventdatadescriptor.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/eventdatadescriptor.go index 8b0ad4816254f..9cbef490069d1 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/eventdatadescriptor.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/etw/eventdatadescriptor.go @@ -1,3 +1,5 @@ +//go:build windows + package etw import ( @@ -13,11 +15,11 @@ const ( ) type eventDataDescriptor struct { - ptr ptr64 - size uint32 - dataType eventDataDescriptorType - reserved1 uint8 - reserved2 uint16 + ptr ptr64 + size uint32 + dataType eventDataDescriptorType + _ uint8 + _ uint16 } func newEventDataDescriptor(dataType eventDataDescriptorType, buffer []byte) eventDataDescriptor { diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/eventdescriptor.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/eventdescriptor.go index cc41f15999c3c..0dd11b458d7d3 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/eventdescriptor.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/etw/eventdescriptor.go @@ -1,3 +1,5 @@ +//go:build windows + package etw // Channel represents the ETW logging channel that is used. It can be used by @@ -45,6 +47,8 @@ const ( ) // EventDescriptor represents various metadata for an ETW event. +// +//nolint:structcheck // task is currently unused type eventDescriptor struct { id uint16 version uint8 @@ -70,6 +74,8 @@ func newEventDescriptor() *eventDescriptor { // should uniquely identify the other event metadata (contained in // EventDescriptor, and field metadata). Only the lower 24 bits of this value // are relevant. +// +//nolint:unused // keep for future use func (ed *eventDescriptor) identity() uint32 { return (uint32(ed.version) << 16) | uint32(ed.id) } @@ -78,6 +84,8 @@ func (ed *eventDescriptor) identity() uint32 { // should uniquely identify the other event metadata (contained in // EventDescriptor, and field metadata). Only the lower 24 bits of this value // are relevant. +// +//nolint:unused // keep for future use func (ed *eventDescriptor) setIdentity(identity uint32) { ed.id = uint16(identity) ed.version = uint8(identity >> 16) diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/eventmetadata.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/eventmetadata.go index 6fdc126cc90e9..a2e151a5020fc 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/eventmetadata.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/etw/eventmetadata.go @@ -1,3 +1,5 @@ +//go:build windows + package etw import ( @@ -10,6 +12,8 @@ type inType byte // Various inType definitions for TraceLogging. These must match the definitions // found in TraceLoggingProvider.h in the Windows SDK. +// +//nolint:deadcode,varcheck // keep unused constants for potential future use const ( inTypeNull inType = iota inTypeUnicodeString @@ -47,6 +51,8 @@ type outType byte // Various outType definitions for TraceLogging. These must match the // definitions found in TraceLoggingProvider.h in the Windows SDK. +// +//nolint:deadcode,varcheck // keep unused constants for potential future use const ( // outTypeDefault indicates that the default formatting for the inType will // be used by the event decoder. @@ -81,11 +87,11 @@ type eventMetadata struct { buffer bytes.Buffer } -// bytes returns the raw binary data containing the event metadata. Before being +// toBytes returns the raw binary data containing the event metadata. Before being // returned, the current size of the buffer is written to the start of the // buffer. The returned value is not copied from the internal buffer, so it can // be mutated by the eventMetadata object after it is returned. -func (em *eventMetadata) bytes() []byte { +func (em *eventMetadata) toBytes() []byte { // Finalize the event metadata buffer by filling in the buffer length at the // beginning. binary.LittleEndian.PutUint16(em.buffer.Bytes(), uint16(em.buffer.Len())) @@ -95,7 +101,7 @@ func (em *eventMetadata) bytes() []byte { // writeEventHeader writes the metadata for the start of an event to the buffer. // This specifies the event name and tags. func (em *eventMetadata) writeEventHeader(name string, tags uint32) { - binary.Write(&em.buffer, binary.LittleEndian, uint16(0)) // Length placeholder + _ = binary.Write(&em.buffer, binary.LittleEndian, uint16(0)) // Length placeholder em.writeTags(tags) em.buffer.WriteString(name) em.buffer.WriteByte(0) // Null terminator for name @@ -118,7 +124,7 @@ func (em *eventMetadata) writeFieldInner(name string, inType inType, outType out } if arrSize != 0 { - binary.Write(&em.buffer, binary.LittleEndian, arrSize) + _ = binary.Write(&em.buffer, binary.LittleEndian, arrSize) } } @@ -151,13 +157,17 @@ func (em *eventMetadata) writeTags(tags uint32) { } // writeField writes the metadata for a simple field to the buffer. +// +//nolint:unparam // tags is currently always 0, may change in the future func (em *eventMetadata) writeField(name string, inType inType, outType outType, tags uint32) { em.writeFieldInner(name, inType, outType, tags, 0) } // writeArray writes the metadata for an array field to the buffer. The number // of elements in the array must be written as a uint16 in the event data, -// immediately preceeding the event data. +// immediately preceding the event data. +// +//nolint:unparam // tags is currently always 0, may change in the future func (em *eventMetadata) writeArray(name string, inType inType, outType outType, tags uint32) { em.writeFieldInner(name, inType|inTypeArray, outType, tags, 0) } @@ -165,6 +175,8 @@ func (em *eventMetadata) writeArray(name string, inType inType, outType outType, // writeCountedArray writes the metadata for an array field to the buffer. The // size of a counted array is fixed, and the size is written into the metadata // directly. +// +//nolint:unused // keep for future use func (em *eventMetadata) writeCountedArray(name string, count uint16, inType inType, outType outType, tags uint32) { em.writeFieldInner(name, inType|inTypeCountedArray, outType, tags, count) } diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/eventopt.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/eventopt.go index eaace6886edc9..73403220c9fb8 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/eventopt.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/etw/eventopt.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package etw diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/fieldopt.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/fieldopt.go index b5ea80a4607aa..b769c89629828 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/fieldopt.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/etw/fieldopt.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package etw @@ -481,7 +482,7 @@ func SmartField(name string, v interface{}) FieldOpt { case reflect.Int32: return SmartField(name, int32(rv.Int())) case reflect.Int64: - return SmartField(name, int64(rv.Int())) + return SmartField(name, int64(rv.Int())) //nolint:unconvert // make look consistent case reflect.Uint: return SmartField(name, uint(rv.Uint())) case reflect.Uint8: @@ -491,13 +492,13 @@ func SmartField(name string, v interface{}) FieldOpt { case reflect.Uint32: return SmartField(name, uint32(rv.Uint())) case reflect.Uint64: - return SmartField(name, uint64(rv.Uint())) + return SmartField(name, uint64(rv.Uint())) //nolint:unconvert // make look consistent case reflect.Uintptr: return SmartField(name, uintptr(rv.Uint())) case reflect.Float32: return SmartField(name, float32(rv.Float())) case reflect.Float64: - return SmartField(name, float64(rv.Float())) + return SmartField(name, float64(rv.Float())) //nolint:unconvert // make look consistent case reflect.String: return SmartField(name, rv.String()) case reflect.Struct: @@ -509,6 +510,9 @@ func SmartField(name string, v interface{}) FieldOpt { } } return Struct(name, fields...) + case reflect.Array, reflect.Chan, reflect.Complex128, reflect.Complex64, + reflect.Func, reflect.Interface, reflect.Invalid, reflect.Map, reflect.Ptr, + reflect.Slice, reflect.UnsafePointer: } } diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider.go index 581ef595a5f97..3669b4f783a80 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider.go @@ -1,3 +1,4 @@ +//go:build windows && (amd64 || arm64 || 386) // +build windows // +build amd64 arm64 386 @@ -45,18 +46,18 @@ func NewProviderWithOptions(name string, options ...ProviderOpt) (provider *Prov trait := &bytes.Buffer{} if opts.group != (guid.GUID{}) { - binary.Write(trait, binary.LittleEndian, uint16(0)) // Write empty size for buffer (update later) - binary.Write(trait, binary.LittleEndian, uint8(1)) // EtwProviderTraitTypeGroup - traitArray := opts.group.ToWindowsArray() // Append group guid + _ = binary.Write(trait, binary.LittleEndian, uint16(0)) // Write empty size for buffer (update later) + _ = binary.Write(trait, binary.LittleEndian, uint8(1)) // EtwProviderTraitTypeGroup + traitArray := opts.group.ToWindowsArray() // Append group guid trait.Write(traitArray[:]) binary.LittleEndian.PutUint16(trait.Bytes(), uint16(trait.Len())) // Update size } metadata := &bytes.Buffer{} - binary.Write(metadata, binary.LittleEndian, uint16(0)) // Write empty size for buffer (to update later) + _ = binary.Write(metadata, binary.LittleEndian, uint16(0)) // Write empty size for buffer (to update later) metadata.WriteString(name) metadata.WriteByte(0) // Null terminator for name - trait.WriteTo(metadata) // Add traits if applicable + _, _ = trait.WriteTo(metadata) // Add traits if applicable binary.LittleEndian.PutUint16(metadata.Bytes(), uint16(metadata.Len())) // Update the size at the beginning of the buffer provider.metadata = metadata.Bytes() @@ -64,8 +65,8 @@ func NewProviderWithOptions(name string, options ...ProviderOpt) (provider *Prov provider.handle, eventInfoClassProviderSetTraits, uintptr(unsafe.Pointer(&provider.metadata[0])), - uint32(len(provider.metadata))); err != nil { - + uint32(len(provider.metadata)), + ); err != nil { return nil, err } diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider_unsupported.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider_unsupported.go index 5a05c13425421..e0057cfe0d078 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider_unsupported.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/etw/newprovider_unsupported.go @@ -1,5 +1,5 @@ -// +build windows -// +build arm +//go:build windows && arm +// +build windows,arm package etw diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/provider.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/provider.go index a5b90d037ddee..8174bff1b0870 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/provider.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/etw/provider.go @@ -1,9 +1,10 @@ +//go:build windows // +build windows package etw import ( - "crypto/sha1" + "crypto/sha1" //nolint:gosec // not used for secure application "encoding/binary" "strings" "unicode/utf16" @@ -27,7 +28,7 @@ type Provider struct { keywordAll uint64 } -// String returns the `provider`.ID as a string +// String returns the `provider`.ID as a string. func (provider *Provider) String() string { if provider == nil { return "" @@ -54,6 +55,7 @@ const ( type eventInfoClass uint32 +//nolint:deadcode,varcheck // keep unused constants for potential future use const ( eventInfoClassProviderBinaryTrackInfo eventInfoClass = iota eventInfoClassProviderSetReserved1 @@ -65,10 +67,19 @@ const ( // enable/disable notifications from ETW. type EnableCallback func(guid.GUID, ProviderState, Level, uint64, uint64, uintptr) -func providerCallback(sourceID guid.GUID, state ProviderState, level Level, matchAnyKeyword uint64, matchAllKeyword uint64, filterData uintptr, i uintptr) { +func providerCallback( + sourceID guid.GUID, + state ProviderState, + level Level, + matchAnyKeyword uint64, + matchAllKeyword uint64, + filterData uintptr, + i uintptr, +) { provider := providers.getProvider(uint(i)) switch state { + case ProviderStateCaptureState: case ProviderStateDisable: provider.enabled = false case ProviderStateEnable: @@ -90,17 +101,22 @@ func providerCallback(sourceID guid.GUID, state ProviderState, level Level, matc // // The algorithm is roughly the RFC 4122 algorithm for a V5 UUID, but differs in // the following ways: -// - The input name is first upper-cased, UTF16-encoded, and converted to -// big-endian. -// - No variant is set on the result UUID. -// - The result UUID is treated as being in little-endian format, rather than -// big-endian. +// - The input name is first upper-cased, UTF16-encoded, and converted to +// big-endian. +// - No variant is set on the result UUID. +// - The result UUID is treated as being in little-endian format, rather than +// big-endian. func providerIDFromName(name string) guid.GUID { - buffer := sha1.New() - namespace := guid.GUID{0x482C2DB2, 0xC390, 0x47C8, [8]byte{0x87, 0xF8, 0x1A, 0x15, 0xBF, 0xC1, 0x30, 0xFB}} + buffer := sha1.New() //nolint:gosec // not used for secure application + namespace := guid.GUID{ + Data1: 0x482C2DB2, + Data2: 0xC390, + Data3: 0x47C8, + Data4: [8]byte{0x87, 0xF8, 0x1A, 0x15, 0xBF, 0xC1, 0x30, 0xFB}, + } namespaceBytes := namespace.ToArray() buffer.Write(namespaceBytes[:]) - binary.Write(buffer, binary.BigEndian, utf16.Encode([]rune(strings.ToUpper(name)))) + _ = binary.Write(buffer, binary.BigEndian, utf16.Encode([]rune(strings.ToUpper(name)))) sum := buffer.Sum(nil) sum[7] = (sum[7] & 0xf) | 0x50 @@ -117,25 +133,24 @@ type providerOpts struct { } // ProviderOpt allows the caller to specify provider options to -// NewProviderWithOptions +// NewProviderWithOptions. type ProviderOpt func(*providerOpts) -// WithCallback is used to provide a callback option to NewProviderWithOptions +// WithCallback is used to provide a callback option to NewProviderWithOptions. func WithCallback(callback EnableCallback) ProviderOpt { return func(opts *providerOpts) { opts.callback = callback } } -// WithID is used to provide a provider ID option to NewProviderWithOptions +// WithID is used to provide a provider ID option to NewProviderWithOptions. func WithID(id guid.GUID) ProviderOpt { return func(opts *providerOpts) { opts.id = id } } -// WithGroup is used to provide a provider group option to -// NewProviderWithOptions +// WithGroup is used to provide a provider group option to NewProviderWithOptions. func WithGroup(group guid.GUID) ProviderOpt { return func(opts *providerOpts) { opts.group = group @@ -237,11 +252,17 @@ func (provider *Provider) WriteEvent(name string, eventOpts []EventOpt, fieldOpt // event metadata (e.g. for the name) so we don't need to do this check for // the metadata. dataBlobs := [][]byte{} - if len(ed.bytes()) > 0 { - dataBlobs = [][]byte{ed.bytes()} + if len(ed.toBytes()) > 0 { + dataBlobs = [][]byte{ed.toBytes()} } - return provider.writeEventRaw(options.descriptor, options.activityID, options.relatedActivityID, [][]byte{em.bytes()}, dataBlobs) + return provider.writeEventRaw( + options.descriptor, + options.activityID, + options.relatedActivityID, + [][]byte{em.toBytes()}, + dataBlobs, + ) } // writeEventRaw writes a single ETW event from the provider. This function is @@ -257,17 +278,24 @@ func (provider *Provider) writeEventRaw( relatedActivityID guid.GUID, metadataBlobs [][]byte, dataBlobs [][]byte) error { - dataDescriptorCount := uint32(1 + len(metadataBlobs) + len(dataBlobs)) dataDescriptors := make([]eventDataDescriptor, 0, dataDescriptorCount) - dataDescriptors = append(dataDescriptors, newEventDataDescriptor(eventDataDescriptorTypeProviderMetadata, provider.metadata)) + dataDescriptors = append(dataDescriptors, + newEventDataDescriptor(eventDataDescriptorTypeProviderMetadata, provider.metadata)) for _, blob := range metadataBlobs { - dataDescriptors = append(dataDescriptors, newEventDataDescriptor(eventDataDescriptorTypeEventMetadata, blob)) + dataDescriptors = append(dataDescriptors, + newEventDataDescriptor(eventDataDescriptorTypeEventMetadata, blob)) } for _, blob := range dataBlobs { - dataDescriptors = append(dataDescriptors, newEventDataDescriptor(eventDataDescriptorTypeUserData, blob)) + dataDescriptors = append(dataDescriptors, + newEventDataDescriptor(eventDataDescriptorTypeUserData, blob)) } - return eventWriteTransfer(provider.handle, descriptor, (*windows.GUID)(&activityID), (*windows.GUID)(&relatedActivityID), dataDescriptorCount, &dataDescriptors[0]) + return eventWriteTransfer(provider.handle, + descriptor, + (*windows.GUID)(&activityID), + (*windows.GUID)(&relatedActivityID), + dataDescriptorCount, + &dataDescriptors[0]) } diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/providerglobal.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/providerglobal.go index ce3d305762ab5..0a1d90dda0283 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/providerglobal.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/etw/providerglobal.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package etw @@ -14,7 +15,6 @@ type providerMap struct { m map[uint]*Provider i uint lock sync.Mutex - once sync.Once } var providers = providerMap{ @@ -50,5 +50,7 @@ func (p *providerMap) getProvider(index uint) *Provider { return p.m[index] } +//todo: combine these into struct, so that "globalProviderCallback" is guaranteed to be initialized through method access + var providerCallbackOnce sync.Once var globalProviderCallback uintptr diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/ptr64_32.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/ptr64_32.go index d1a76125d7e66..26c9f1948af37 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/ptr64_32.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/etw/ptr64_32.go @@ -1,3 +1,5 @@ +//go:build windows && (386 || arm) +// +build windows // +build 386 arm package etw diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/ptr64_64.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/ptr64_64.go index b86c8f2bd8822..1524c643fd29d 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/ptr64_64.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/etw/ptr64_64.go @@ -1,3 +1,5 @@ +//go:build windows && (amd64 || arm64) +// +build windows // +build amd64 arm64 package etw diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/etw.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/syscall.go similarity index 70% rename from vendor/github.com/Microsoft/go-winio/pkg/etw/etw.go rename to vendor/github.com/Microsoft/go-winio/pkg/etw/syscall.go index 10cd08d84c2b9..16f3bb13e5963 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/etw.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/etw/syscall.go @@ -1,13 +1,8 @@ -// Package etw provides support for TraceLogging-based ETW (Event Tracing -// for Windows). TraceLogging is a format of ETW events that are self-describing -// (the event contains information on its own schema). This allows them to be -// decoded without needing a separate manifest with event information. The -// implementation here is based on the information found in -// TraceLoggingProvider.h in the Windows SDK, which implements TraceLogging as a -// set of C macros. +//go:build windows + package etw -//go:generate go run mksyscall_windows.go -output zsyscall_windows.go etw.go +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go syscall.go //sys eventRegister(providerId *windows.GUID, callback uintptr, callbackContext uintptr, providerHandle *providerHandle) (win32err error) = advapi32.EventRegister diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_32.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_32.go index 6867a1f87843e..14c49984208cf 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_32.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_32.go @@ -1,3 +1,4 @@ +//go:build windows && (386 || arm) // +build windows // +build 386 arm diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_64.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_64.go index fe83df2bf0ec0..8cfe2e8cab6c2 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_64.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/etw/wrapper_64.go @@ -1,3 +1,4 @@ +//go:build windows && (amd64 || arm64) // +build windows // +build amd64 arm64 @@ -19,7 +20,6 @@ func eventWriteTransfer( relatedActivityID *windows.GUID, dataDescriptorCount uint32, dataDescriptors *eventDataDescriptor) (win32err error) { - return eventWriteTransfer_64( providerHandle, descriptor, @@ -34,7 +34,6 @@ func eventSetInformation( class eventInfoClass, information uintptr, length uint32) (win32err error) { - return eventSetInformation_64( providerHandle, class, @@ -46,7 +45,21 @@ func eventSetInformation( // for provider notifications. Because Go has trouble with callback arguments of // different size, it has only pointer-sized arguments, which are then cast to // the appropriate types when calling providerCallback. -func providerCallbackAdapter(sourceID *guid.GUID, state uintptr, level uintptr, matchAnyKeyword uintptr, matchAllKeyword uintptr, filterData uintptr, i uintptr) uintptr { - providerCallback(*sourceID, ProviderState(state), Level(level), uint64(matchAnyKeyword), uint64(matchAllKeyword), filterData, i) +func providerCallbackAdapter( + sourceID *guid.GUID, + state uintptr, + level uintptr, + matchAnyKeyword uintptr, + matchAllKeyword uintptr, + filterData uintptr, + i uintptr, +) uintptr { + providerCallback(*sourceID, + ProviderState(state), + Level(level), + uint64(matchAnyKeyword), + uint64(matchAllKeyword), + filterData, + i) return 0 } diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etw/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/etw/zsyscall_windows.go index 719b13d284293..c78a6ed543f42 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/etw/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/etw/zsyscall_windows.go @@ -1,4 +1,6 @@ -// Code generated by 'go generate'; DO NOT EDIT. +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. package etw diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook.go b/vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook.go index 4332af56492dd..76f6239a54dc3 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/hook.go @@ -1,40 +1,79 @@ +//go:build windows // +build windows package etwlogrus import ( + "errors" "sort" - "github.com/Microsoft/go-winio/pkg/etw" "github.com/sirupsen/logrus" + + "github.com/Microsoft/go-winio/pkg/etw" ) +const defaultEventName = "LogrusEntry" + +// ErrNoProvider is returned when a hook is created without a provider being configured. +var ErrNoProvider = errors.New("no ETW registered provider") + +// HookOpt is an option to change the behavior of the Logrus ETW hook. +type HookOpt func(*Hook) error + // Hook is a Logrus hook which logs received events to ETW. type Hook struct { provider *etw.Provider closeProvider bool + // allows setting the entry name + getName func(*logrus.Entry) string + // returns additional options to add to the event + getEventsOpts func(*logrus.Entry) []etw.EventOpt +} + +// NewHook registers a new ETW provider and returns a hook to log from it. +// The provider will be closed when the hook is closed. +func NewHook(providerName string, opts ...HookOpt) (*Hook, error) { + opts = append(opts, WithNewETWProvider(providerName)) + + return NewHookFromOpts(opts...) +} + +// NewHookFromProvider creates a new hook based on an existing ETW provider. +// The provider will not be closed when the hook is closed. +func NewHookFromProvider(provider *etw.Provider, opts ...HookOpt) (*Hook, error) { + opts = append(opts, WithExistingETWProvider(provider)) + + return NewHookFromOpts(opts...) } -// NewHook registers a new ETW provider and returns a hook to log from it. The -// provider will be closed when the hook is closed. -func NewHook(providerName string) (*Hook, error) { - provider, err := etw.NewProvider(providerName, nil) - if err != nil { - return nil, err +// NewHookFromOpts creates a new hook with the provided options. +// An error is returned if the hook does not have a valid provider. +func NewHookFromOpts(opts ...HookOpt) (*Hook, error) { + h := defaultHook() + + for _, o := range opts { + if err := o(h); err != nil { + return nil, err + } } + return h, h.validate() +} - return &Hook{provider, true}, nil +func defaultHook() *Hook { + h := &Hook{} + return h } -// NewHookFromProvider creates a new hook based on an existing ETW provider. The -// provider will not be closed when the hook is closed. -func NewHookFromProvider(provider *etw.Provider) (*Hook, error) { - return &Hook{provider, false}, nil +func (h *Hook) validate() error { + if h.provider == nil { + return ErrNoProvider + } + return nil } // Levels returns the set of levels that this hook wants to receive log entries // for. -func (h *Hook) Levels() []logrus.Level { +func (*Hook) Levels() []logrus.Level { return logrus.AllLevels } @@ -58,6 +97,21 @@ func (h *Hook) Fire(e *logrus.Entry) error { return nil } + name := defaultEventName + if h.getName != nil { + if n := h.getName(e); n != "" { + name = n + } + } + + // extra room for two more options in addition to log level to avoid repeated reallocations + // if the user also provides options + opts := make([]etw.EventOpt, 0, 3) + opts = append(opts, etw.WithLevel(level)) + if h.getEventsOpts != nil { + opts = append(opts, h.getEventsOpts(e)...) + } + // Sort the fields by name so they are consistent in each instance // of an event. Otherwise, the fields don't line up in WPA. names := make([]string, 0, len(e.Data)) @@ -88,10 +142,7 @@ func (h *Hook) Fire(e *logrus.Entry) error { // as a session listening for the event having no available space in its // buffers). Therefore, we don't return the error from WriteEvent, as it is // just noise in many cases. - h.provider.WriteEvent( - "LogrusEntry", - etw.WithEventOpts(etw.WithLevel(level)), - fields) + _ = h.provider.WriteEvent(name, opts, fields) return nil } diff --git a/vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/opts.go b/vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/opts.go new file mode 100644 index 0000000000000..499fca87c43a8 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/etwlogrus/opts.go @@ -0,0 +1,53 @@ +//go:build windows + +package etwlogrus + +import ( + "github.com/sirupsen/logrus" + + "github.com/Microsoft/go-winio/pkg/etw" +) + +// etw provider + +// WithNewETWProvider registers a new ETW provider and sets the hook to log using it. +// The provider will be closed when the hook is closed. +func WithNewETWProvider(n string) HookOpt { + return func(h *Hook) error { + provider, err := etw.NewProvider(n, nil) + if err != nil { + return err + } + + h.provider = provider + h.closeProvider = true + return nil + } +} + +// WithExistingETWProvider configures the hook to use an existing ETW provider. +// The provider will not be closed when the hook is closed. +func WithExistingETWProvider(p *etw.Provider) HookOpt { + return func(h *Hook) error { + h.provider = p + h.closeProvider = false + return nil + } +} + +// WithGetName sets the ETW EventName of an event to the value returned by f +// If the name is empty, the default event name will be used. +func WithGetName(f func(*logrus.Entry) string) HookOpt { + return func(h *Hook) error { + h.getName = f + return nil + } +} + +// WithEventOpts allows additional ETW event properties (keywords, tags, etc.) to be specified. +func WithEventOpts(f func(*logrus.Entry) []etw.EventOpt) HookOpt { + return func(h *Hook) error { + h.getEventsOpts = f + return nil + } +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/fs/fs_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/fs/fs_windows.go index 53bc797e79ac8..7a73aafb988ee 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/fs/fs_windows.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/fs/fs_windows.go @@ -27,5 +27,5 @@ func GetFileSystemType(path string) (fsType string, err error) { drive += `\` err = windows.GetVolumeInformation(windows.StringToUTF16Ptr(drive), nil, 0, nil, nil, nil, &buf[0], size) fsType = windows.UTF16ToString(buf) - return + return fsType, err } diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go index 2d9161e2deeea..48ce4e9243662 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go @@ -1,5 +1,3 @@ -// +build windows - // Package guid provides a GUID type. The backing structure for a GUID is // identical to that used by the golang.org/x/sys/windows GUID type. // There are two main binary encodings used for a GUID, the big-endian encoding, @@ -9,24 +7,26 @@ package guid import ( "crypto/rand" - "crypto/sha1" + "crypto/sha1" //nolint:gosec // not used for secure application "encoding" "encoding/binary" "fmt" "strconv" ) +//go:generate go run golang.org/x/tools/cmd/stringer -type=Variant -trimprefix=Variant -linecomment + // Variant specifies which GUID variant (or "type") of the GUID. It determines // how the entirety of the rest of the GUID is interpreted. type Variant uint8 -// The variants specified by RFC 4122. +// The variants specified by RFC 4122 section 4.1.1. const ( // VariantUnknown specifies a GUID variant which does not conform to one of // the variant encodings specified in RFC 4122. VariantUnknown Variant = iota VariantNCS - VariantRFC4122 + VariantRFC4122 // RFC 4122 VariantMicrosoft VariantFuture ) @@ -36,6 +36,10 @@ const ( // hash of an input string. type Version uint8 +func (v Version) String() string { + return strconv.FormatUint(uint64(v), 10) +} + var _ = (encoding.TextMarshaler)(GUID{}) var _ = (encoding.TextUnmarshaler)(&GUID{}) @@ -61,7 +65,7 @@ func NewV4() (GUID, error) { // big-endian UTF16 stream of bytes. If that is desired, the string can be // encoded as such before being passed to this function. func NewV5(namespace GUID, name []byte) (GUID, error) { - b := sha1.New() + b := sha1.New() //nolint:gosec // not used for secure application namespaceBytes := namespace.ToArray() b.Write(namespaceBytes[:]) b.Write(name) diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go index f64d828c0ba41..805bd35484241 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_nonwindows.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package guid diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go index 83617f4eee9a1..27e45ee5ccf9e 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid_windows.go @@ -1,3 +1,6 @@ +//go:build windows +// +build windows + package guid import "golang.org/x/sys/windows" diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go new file mode 100644 index 0000000000000..4076d3132fdd8 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/variant_string.go @@ -0,0 +1,27 @@ +// Code generated by "stringer -type=Variant -trimprefix=Variant -linecomment"; DO NOT EDIT. + +package guid + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[VariantUnknown-0] + _ = x[VariantNCS-1] + _ = x[VariantRFC4122-2] + _ = x[VariantMicrosoft-3] + _ = x[VariantFuture-4] +} + +const _Variant_name = "UnknownNCSRFC 4122MicrosoftFuture" + +var _Variant_index = [...]uint8{0, 7, 10, 18, 27, 33} + +func (i Variant) String() string { + if i >= Variant(len(_Variant_index)-1) { + return "Variant(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Variant_name[_Variant_index[i]:_Variant_index[i+1]] +} diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go index c3dd7c2176971..0ff9dac906d3c 100644 --- a/vendor/github.com/Microsoft/go-winio/privilege.go +++ b/vendor/github.com/Microsoft/go-winio/privilege.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package winio @@ -24,22 +25,17 @@ import ( //sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW const ( - SE_PRIVILEGE_ENABLED = 2 + //revive:disable-next-line:var-naming ALL_CAPS + SE_PRIVILEGE_ENABLED = windows.SE_PRIVILEGE_ENABLED - ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 + //revive:disable-next-line:var-naming ALL_CAPS + ERROR_NOT_ALL_ASSIGNED syscall.Errno = windows.ERROR_NOT_ALL_ASSIGNED SeBackupPrivilege = "SeBackupPrivilege" SeRestorePrivilege = "SeRestorePrivilege" SeSecurityPrivilege = "SeSecurityPrivilege" ) -const ( - securityAnonymous = iota - securityIdentification - securityImpersonation - securityDelegation -) - var ( privNames = make(map[string]uint64) privNameMutex sync.Mutex @@ -51,11 +47,9 @@ type PrivilegeError struct { } func (e *PrivilegeError) Error() string { - s := "" + s := "Could not enable privilege " if len(e.privileges) > 1 { s = "Could not enable privileges " - } else { - s = "Could not enable privilege " } for i, p := range e.privileges { if i != 0 { @@ -94,7 +88,7 @@ func RunWithPrivileges(names []string, fn func() error) error { } func mapPrivileges(names []string) ([]uint64, error) { - var privileges []uint64 + privileges := make([]uint64, 0, len(names)) privNameMutex.Lock() defer privNameMutex.Unlock() for _, name := range names { @@ -127,7 +121,7 @@ func enableDisableProcessPrivilege(names []string, action uint32) error { return err } - p, _ := windows.GetCurrentProcess() + p := windows.CurrentProcess() var token windows.Token err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) if err != nil { @@ -140,10 +134,10 @@ func enableDisableProcessPrivilege(names []string, action uint32) error { func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { var b bytes.Buffer - binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) + _ = binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) for _, p := range privileges { - binary.Write(&b, binary.LittleEndian, p) - binary.Write(&b, binary.LittleEndian, action) + _ = binary.Write(&b, binary.LittleEndian, p) + _ = binary.Write(&b, binary.LittleEndian, action) } prevState := make([]byte, b.Len()) reqSize := uint32(0) @@ -151,7 +145,7 @@ func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) e if !success { return err } - if err == ERROR_NOT_ALL_ASSIGNED { + if err == ERROR_NOT_ALL_ASSIGNED { //nolint:errorlint // err is Errno return &PrivilegeError{privileges} } return nil @@ -177,7 +171,7 @@ func getPrivilegeName(luid uint64) string { } func newThreadToken() (windows.Token, error) { - err := impersonateSelf(securityImpersonation) + err := impersonateSelf(windows.SecurityImpersonation) if err != nil { return 0, err } diff --git a/vendor/github.com/Microsoft/go-winio/reparse.go b/vendor/github.com/Microsoft/go-winio/reparse.go index fc1ee4d3a3e91..67d1a104a63f6 100644 --- a/vendor/github.com/Microsoft/go-winio/reparse.go +++ b/vendor/github.com/Microsoft/go-winio/reparse.go @@ -1,3 +1,6 @@ +//go:build windows +// +build windows + package winio import ( @@ -113,16 +116,16 @@ func EncodeReparsePoint(rp *ReparsePoint) []byte { } var b bytes.Buffer - binary.Write(&b, binary.LittleEndian, &data) + _ = binary.Write(&b, binary.LittleEndian, &data) if !rp.IsMountPoint { flags := uint32(0) if relative { flags |= 1 } - binary.Write(&b, binary.LittleEndian, flags) + _ = binary.Write(&b, binary.LittleEndian, flags) } - binary.Write(&b, binary.LittleEndian, ntTarget16) - binary.Write(&b, binary.LittleEndian, target16) + _ = binary.Write(&b, binary.LittleEndian, ntTarget16) + _ = binary.Write(&b, binary.LittleEndian, target16) return b.Bytes() } diff --git a/vendor/github.com/Microsoft/go-winio/sd.go b/vendor/github.com/Microsoft/go-winio/sd.go index db1b370a1b57d..5550ef6b61ef2 100644 --- a/vendor/github.com/Microsoft/go-winio/sd.go +++ b/vendor/github.com/Microsoft/go-winio/sd.go @@ -1,23 +1,25 @@ +//go:build windows // +build windows package winio import ( + "errors" "syscall" "unsafe" + + "golang.org/x/sys/windows" ) //sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW +//sys lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountSidW //sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW +//sys convertStringSidToSid(str *uint16, sid **byte) (err error) = advapi32.ConvertStringSidToSidW //sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW //sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW //sys localFree(mem uintptr) = LocalFree //sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength -const ( - cERROR_NONE_MAPPED = syscall.Errno(1332) -) - type AccountLookupError struct { Name string Err error @@ -28,8 +30,10 @@ func (e *AccountLookupError) Error() string { return "lookup account: empty account name specified" } var s string - switch e.Err { - case cERROR_NONE_MAPPED: + switch { + case errors.Is(e.Err, windows.ERROR_INVALID_SID): + s = "the security ID structure is invalid" + case errors.Is(e.Err, windows.ERROR_NONE_MAPPED): s = "not found" default: s = e.Err.Error() @@ -37,6 +41,8 @@ func (e *AccountLookupError) Error() string { return "lookup account " + e.Name + ": " + s } +func (e *AccountLookupError) Unwrap() error { return e.Err } + type SddlConversionError struct { Sddl string Err error @@ -46,15 +52,19 @@ func (e *SddlConversionError) Error() string { return "convert " + e.Sddl + ": " + e.Err.Error() } +func (e *SddlConversionError) Unwrap() error { return e.Err } + // LookupSidByName looks up the SID of an account by name +// +//revive:disable-next-line:var-naming SID, not Sid func LookupSidByName(name string) (sid string, err error) { if name == "" { - return "", &AccountLookupError{name, cERROR_NONE_MAPPED} + return "", &AccountLookupError{name, windows.ERROR_NONE_MAPPED} } var sidSize, sidNameUse, refDomainSize uint32 err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) - if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { + if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno return "", &AccountLookupError{name, err} } sidBuffer := make([]byte, sidSize) @@ -73,6 +83,42 @@ func LookupSidByName(name string) (sid string, err error) { return sid, nil } +// LookupNameBySid looks up the name of an account by SID +// +//revive:disable-next-line:var-naming SID, not Sid +func LookupNameBySid(sid string) (name string, err error) { + if sid == "" { + return "", &AccountLookupError{sid, windows.ERROR_NONE_MAPPED} + } + + sidBuffer, err := windows.UTF16PtrFromString(sid) + if err != nil { + return "", &AccountLookupError{sid, err} + } + + var sidPtr *byte + if err = convertStringSidToSid(sidBuffer, &sidPtr); err != nil { + return "", &AccountLookupError{sid, err} + } + defer localFree(uintptr(unsafe.Pointer(sidPtr))) + + var nameSize, refDomainSize, sidNameUse uint32 + err = lookupAccountSid(nil, sidPtr, nil, &nameSize, nil, &refDomainSize, &sidNameUse) + if err != nil && err != windows.ERROR_INSUFFICIENT_BUFFER { //nolint:errorlint // err is Errno + return "", &AccountLookupError{sid, err} + } + + nameBuffer := make([]uint16, nameSize) + refDomainBuffer := make([]uint16, refDomainSize) + err = lookupAccountSid(nil, sidPtr, &nameBuffer[0], &nameSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) + if err != nil { + return "", &AccountLookupError{sid, err} + } + + name = windows.UTF16ToString(nameBuffer) + return name, nil +} + func SddlToSecurityDescriptor(sddl string) ([]byte, error) { var sdBuffer uintptr err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil) @@ -87,7 +133,7 @@ func SddlToSecurityDescriptor(sddl string) ([]byte, error) { func SecurityDescriptorToSddl(sd []byte) (string, error) { var sddl *uint16 - // The returned string length seems to including an aribtrary number of terminating NULs. + // The returned string length seems to include an arbitrary number of terminating NULs. // Don't use it. err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil) if err != nil { diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go index 5955c99fdeafe..a6ca111b39c4b 100644 --- a/vendor/github.com/Microsoft/go-winio/syscall.go +++ b/vendor/github.com/Microsoft/go-winio/syscall.go @@ -1,3 +1,5 @@ +//go:build windows + package winio -//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go ./*.go diff --git a/vendor/github.com/Microsoft/go-winio/tools.go b/vendor/github.com/Microsoft/go-winio/tools.go new file mode 100644 index 0000000000000..2aa045843ea81 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/tools.go @@ -0,0 +1,5 @@ +//go:build tools + +package winio + +import _ "golang.org/x/tools/cmd/stringer" diff --git a/vendor/github.com/Microsoft/go-winio/tools/mkwinsyscall/doc.go b/vendor/github.com/Microsoft/go-winio/tools/mkwinsyscall/doc.go new file mode 100644 index 0000000000000..20b172ccf6fec --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/tools/mkwinsyscall/doc.go @@ -0,0 +1,57 @@ +/* +mkwinsyscall generates windows system call bodies + +It parses all files specified on command line containing function +prototypes (like syscall_windows.go) and prints system call bodies +to standard output. + +The prototypes are marked by lines beginning with "//sys" and read +like func declarations if //sys is replaced by func, but: + + - The parameter lists must give a name for each argument. This + includes return parameters. + + - The parameter lists must give a type for each argument: + the (x, y, z int) shorthand is not allowed. + + - If the return parameter is an error number, it must be named err. + + - If go func name needs to be different from its winapi dll name, + the winapi name could be specified at the end, after "=" sign, like + + //sys LoadLibrary(libname string) (handle uint32, err error) = LoadLibraryA + + - Each function that returns err needs to supply a condition, that + return value of winapi will be tested against to detect failure. + This would set err to windows "last-error", otherwise it will be nil. + The value can be provided at end of //sys declaration, like + + //sys LoadLibrary(libname string) (handle uint32, err error) [failretval==-1] = LoadLibraryA + + and is [failretval==0] by default. + + - If the function name ends in a "?", then the function not existing is non- + fatal, and an error will be returned instead of panicking. + +Usage: + + mkwinsyscall [flags] [path ...] + +Flags + + -output string + Output file name (standard output if omitted). + -sort + Sort DLL and function declarations (default true). + Intended to help transition from older versions of mkwinsyscall by making diffs + easier to read and understand. + -systemdll + Whether all DLLs should be loaded from the Windows system directory (default true). + -trace + Generate print statement after every syscall. + -utf16 + Encode string arguments as UTF-16 for syscalls not ending in 'A' or 'W' (default true). + -winio + Import this package ("github.com/Microsoft/go-winio"). +*/ +package main diff --git a/vendor/github.com/Microsoft/go-winio/tools/mkwinsyscall/mkwinsyscall.go b/vendor/github.com/Microsoft/go-winio/tools/mkwinsyscall/mkwinsyscall.go new file mode 100644 index 0000000000000..e72be31384c65 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/tools/mkwinsyscall/mkwinsyscall.go @@ -0,0 +1,1014 @@ +//go:build windows + +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "bufio" + "bytes" + "errors" + "flag" + "fmt" + "go/format" + "go/parser" + "go/token" + "io" + "log" + "os" + "path/filepath" + "runtime" + "sort" + "strconv" + "strings" + "text/template" + + "golang.org/x/sys/windows" +) + +const ( + pkgSyscall = "syscall" + pkgWindows = "windows" + + // common types. + + tBool = "bool" + tBoolPtr = "*bool" + tError = "error" + tString = "string" + + // error variable names. + + varErr = "err" + varErrNTStatus = "ntStatus" + varErrHR = "hr" +) + +var ( + filename = flag.String("output", "", "output file name (standard output if omitted)") + printTraceFlag = flag.Bool("trace", false, "generate print statement after every syscall") + systemDLL = flag.Bool("systemdll", true, "whether all DLLs should be loaded from the Windows system directory") + winio = flag.Bool("winio", false, `import this package ("github.com/Microsoft/go-winio")`) + utf16 = flag.Bool("utf16", true, "encode string arguments as UTF-16 for syscalls not ending in 'A' or 'W'") + sortdecls = flag.Bool("sort", true, "sort DLL and function declarations") +) + +func trim(s string) string { + return strings.Trim(s, " \t") +} + +func endsIn(s string, c byte) bool { + return len(s) >= 1 && s[len(s)-1] == c +} + +var packageName string + +func packagename() string { + return packageName +} + +func windowsdot() string { + if packageName == pkgWindows { + return "" + } + return pkgWindows + "." +} + +func syscalldot() string { + if packageName == pkgSyscall { + return "" + } + return pkgSyscall + "." +} + +// Param is function parameter. +type Param struct { + Name string + Type string + fn *Fn + tmpVarIdx int +} + +// tmpVar returns temp variable name that will be used to represent p during syscall. +func (p *Param) tmpVar() string { + if p.tmpVarIdx < 0 { + p.tmpVarIdx = p.fn.curTmpVarIdx + p.fn.curTmpVarIdx++ + } + return fmt.Sprintf("_p%d", p.tmpVarIdx) +} + +// BoolTmpVarCode returns source code for bool temp variable. +func (p *Param) BoolTmpVarCode() string { + const code = `var %[1]s uint32 + if %[2]s { + %[1]s = 1 + }` + return fmt.Sprintf(code, p.tmpVar(), p.Name) +} + +// BoolPointerTmpVarCode returns source code for bool temp variable. +func (p *Param) BoolPointerTmpVarCode() string { + const code = `var %[1]s uint32 + if *%[2]s { + %[1]s = 1 + }` + return fmt.Sprintf(code, p.tmpVar(), p.Name) +} + +// SliceTmpVarCode returns source code for slice temp variable. +func (p *Param) SliceTmpVarCode() string { + const code = `var %s *%s + if len(%s) > 0 { + %s = &%s[0] + }` + tmp := p.tmpVar() + return fmt.Sprintf(code, tmp, p.Type[2:], p.Name, tmp, p.Name) +} + +// StringTmpVarCode returns source code for string temp variable. +func (p *Param) StringTmpVarCode() string { + errvar := p.fn.Rets.ErrorVarName() + if errvar == "" { + errvar = "_" + } + tmp := p.tmpVar() + const code = `var %s %s + %s, %s = %s(%s)` + s := fmt.Sprintf(code, tmp, p.fn.StrconvType(), tmp, errvar, p.fn.StrconvFunc(), p.Name) + if errvar == "-" { + return s + } + const morecode = ` + if %s != nil { + return + }` + return s + fmt.Sprintf(morecode, errvar) +} + +// TmpVarCode returns source code for temp variable. +func (p *Param) TmpVarCode() string { + switch { + case p.Type == tBool: + return p.BoolTmpVarCode() + case p.Type == tBoolPtr: + return p.BoolPointerTmpVarCode() + case strings.HasPrefix(p.Type, "[]"): + return p.SliceTmpVarCode() + default: + return "" + } +} + +// TmpVarReadbackCode returns source code for reading back the temp variable into the original variable. +func (p *Param) TmpVarReadbackCode() string { + switch { + case p.Type == tBoolPtr: + return fmt.Sprintf("*%s = %s != 0", p.Name, p.tmpVar()) + default: + return "" + } +} + +// TmpVarHelperCode returns source code for helper's temp variable. +func (p *Param) TmpVarHelperCode() string { + if p.Type != "string" { + return "" + } + return p.StringTmpVarCode() +} + +// SyscallArgList returns source code fragments representing p parameter +// in syscall. Slices are translated into 2 syscall parameters: pointer to +// the first element and length. +func (p *Param) SyscallArgList() []string { + t := p.HelperType() + var s string + switch { + case t == tBoolPtr: + s = fmt.Sprintf("unsafe.Pointer(&%s)", p.tmpVar()) + case t[0] == '*': + s = fmt.Sprintf("unsafe.Pointer(%s)", p.Name) + case t == tBool: + s = p.tmpVar() + case strings.HasPrefix(t, "[]"): + return []string{ + fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.tmpVar()), + fmt.Sprintf("uintptr(len(%s))", p.Name), + } + default: + s = p.Name + } + return []string{fmt.Sprintf("uintptr(%s)", s)} +} + +// IsError determines if p parameter is used to return error. +func (p *Param) IsError() bool { + return p.Name == varErr && p.Type == tError +} + +// HelperType returns type of parameter p used in helper function. +func (p *Param) HelperType() string { + if p.Type == tString { + return p.fn.StrconvType() + } + return p.Type +} + +// join concatenates parameters ps into a string with sep separator. +// Each parameter is converted into string by applying fn to it +// before conversion. +func join(ps []*Param, fn func(*Param) string, sep string) string { + if len(ps) == 0 { + return "" + } + a := make([]string, 0) + for _, p := range ps { + a = append(a, fn(p)) + } + return strings.Join(a, sep) +} + +// Rets describes function return parameters. +type Rets struct { + Name string + Type string + ReturnsError bool + FailCond string + fnMaybeAbsent bool +} + +// ErrorVarName returns error variable name for r. +func (r *Rets) ErrorVarName() string { + if r.ReturnsError { + return varErr + } + if r.Type == tError { + return r.Name + } + return "" +} + +// ToParams converts r into slice of *Param. +func (r *Rets) ToParams() []*Param { + ps := make([]*Param, 0) + if len(r.Name) > 0 { + ps = append(ps, &Param{Name: r.Name, Type: r.Type}) + } + if r.ReturnsError { + ps = append(ps, &Param{Name: varErr, Type: tError}) + } + return ps +} + +// List returns source code of syscall return parameters. +func (r *Rets) List() string { + s := join(r.ToParams(), func(p *Param) string { return p.Name + " " + p.Type }, ", ") + if len(s) > 0 { + s = "(" + s + ")" + } else if r.fnMaybeAbsent { + s = "(err error)" + } + return s +} + +// PrintList returns source code of trace printing part correspondent +// to syscall return values. +func (r *Rets) PrintList() string { + return join(r.ToParams(), func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `) +} + +// SetReturnValuesCode returns source code that accepts syscall return values. +func (r *Rets) SetReturnValuesCode() string { + if r.Name == "" && !r.ReturnsError { + return "" + } + retvar := "r0" + if r.Name == "" { + retvar = "r1" + } + errvar := "_" + if r.ReturnsError { + errvar = "e1" + } + return fmt.Sprintf("%s, _, %s := ", retvar, errvar) +} + +func (r *Rets) useLongHandleErrorCode(retvar string) string { + const code = `if %s { + err = errnoErr(e1) + }` + cond := retvar + " == 0" + if r.FailCond != "" { + cond = strings.Replace(r.FailCond, "failretval", retvar, 1) + } + return fmt.Sprintf(code, cond) +} + +// SetErrorCode returns source code that sets return parameters. +func (r *Rets) SetErrorCode() string { + const code = `if r0 != 0 { + %s = %sErrno(r0) + }` + const ntStatus = `if r0 != 0 { + %s = %sNTStatus(r0) + }` + const hrCode = `if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + %s = %sErrno(r0) + }` + + if r.Name == "" && !r.ReturnsError { + return "" + } + if r.Name == "" { + return r.useLongHandleErrorCode("r1") + } + if r.Type == tError { + switch r.Name { + case varErrNTStatus, strings.ToLower(varErrNTStatus): // allow ntstatus to work + return fmt.Sprintf(ntStatus, r.Name, windowsdot()) + case varErrHR: + return fmt.Sprintf(hrCode, r.Name, syscalldot()) + default: + return fmt.Sprintf(code, r.Name, syscalldot()) + } + } + + var s string + switch { + case r.Type[0] == '*': + s = fmt.Sprintf("%s = (%s)(unsafe.Pointer(r0))", r.Name, r.Type) + case r.Type == tBool: + s = fmt.Sprintf("%s = r0 != 0", r.Name) + default: + s = fmt.Sprintf("%s = %s(r0)", r.Name, r.Type) + } + if !r.ReturnsError { + return s + } + return s + "\n\t" + r.useLongHandleErrorCode(r.Name) +} + +// Fn describes syscall function. +type Fn struct { + Name string + Params []*Param + Rets *Rets + PrintTrace bool + dllname string + dllfuncname string + src string + // TODO: get rid of this field and just use parameter index instead + curTmpVarIdx int // insure tmp variables have uniq names +} + +// extractParams parses s to extract function parameters. +func extractParams(s string, f *Fn) ([]*Param, error) { + s = trim(s) + if s == "" { + return nil, nil + } + a := strings.Split(s, ",") + ps := make([]*Param, len(a)) + for i := range ps { + s2 := trim(a[i]) + b := strings.Split(s2, " ") + if len(b) != 2 { + b = strings.Split(s2, "\t") + if len(b) != 2 { + return nil, errors.New("Could not extract function parameter from \"" + s2 + "\"") + } + } + ps[i] = &Param{ + Name: trim(b[0]), + Type: trim(b[1]), + fn: f, + tmpVarIdx: -1, + } + } + return ps, nil +} + +// extractSection extracts text out of string s starting after start +// and ending just before end. found return value will indicate success, +// and prefix, body and suffix will contain correspondent parts of string s. +func extractSection(s string, start, end rune) (prefix, body, suffix string, found bool) { + s = trim(s) + if strings.HasPrefix(s, string(start)) { + // no prefix + body = s[1:] + } else { + a := strings.SplitN(s, string(start), 2) + if len(a) != 2 { + return "", "", s, false + } + prefix = a[0] + body = a[1] + } + a := strings.SplitN(body, string(end), 2) + if len(a) != 2 { + return "", "", "", false + } + return prefix, a[0], a[1], true +} + +// newFn parses string s and return created function Fn. +func newFn(s string) (*Fn, error) { + s = trim(s) + f := &Fn{ + Rets: &Rets{}, + src: s, + PrintTrace: *printTraceFlag, + } + // function name and args + prefix, body, s, found := extractSection(s, '(', ')') + if !found || prefix == "" { + return nil, errors.New("Could not extract function name and parameters from \"" + f.src + "\"") + } + f.Name = prefix + var err error + f.Params, err = extractParams(body, f) + if err != nil { + return nil, err + } + // return values + _, body, s, found = extractSection(s, '(', ')') + if found { + r, err := extractParams(body, f) + if err != nil { + return nil, err + } + switch len(r) { + case 0: + case 1: + if r[0].IsError() { + f.Rets.ReturnsError = true + } else { + f.Rets.Name = r[0].Name + f.Rets.Type = r[0].Type + } + case 2: + if !r[1].IsError() { + return nil, errors.New("Only last windows error is allowed as second return value in \"" + f.src + "\"") + } + f.Rets.ReturnsError = true + f.Rets.Name = r[0].Name + f.Rets.Type = r[0].Type + default: + return nil, errors.New("Too many return values in \"" + f.src + "\"") + } + } + // fail condition + _, body, s, found = extractSection(s, '[', ']') + if found { + f.Rets.FailCond = body + } + // dll and dll function names + s = trim(s) + if s == "" { + return f, nil + } + if !strings.HasPrefix(s, "=") { + return nil, errors.New("Could not extract dll name from \"" + f.src + "\"") + } + s = trim(s[1:]) + a := strings.Split(s, ".") + switch len(a) { + case 1: + f.dllfuncname = a[0] + case 2: + f.dllname = a[0] + f.dllfuncname = a[1] + default: + return nil, errors.New("Could not extract dll name from \"" + f.src + "\"") + } + if n := f.dllfuncname; endsIn(n, '?') { + f.dllfuncname = n[:len(n)-1] + f.Rets.fnMaybeAbsent = true + } + return f, nil +} + +// DLLName returns DLL name for function f. +func (f *Fn) DLLName() string { + if f.dllname == "" { + return "kernel32" + } + return f.dllname +} + +// DLLName returns DLL function name for function f. +func (f *Fn) DLLFuncName() string { + if f.dllfuncname == "" { + return f.Name + } + return f.dllfuncname +} + +// ParamList returns source code for function f parameters. +func (f *Fn) ParamList() string { + return join(f.Params, func(p *Param) string { return p.Name + " " + p.Type }, ", ") +} + +// HelperParamList returns source code for helper function f parameters. +func (f *Fn) HelperParamList() string { + return join(f.Params, func(p *Param) string { return p.Name + " " + p.HelperType() }, ", ") +} + +// ParamPrintList returns source code of trace printing part correspondent +// to syscall input parameters. +func (f *Fn) ParamPrintList() string { + return join(f.Params, func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `) +} + +// ParamCount return number of syscall parameters for function f. +func (f *Fn) ParamCount() int { + n := 0 + for _, p := range f.Params { + n += len(p.SyscallArgList()) + } + return n +} + +// SyscallParamCount determines which version of Syscall/Syscall6/Syscall9/... +// to use. It returns parameter count for correspondent SyscallX function. +func (f *Fn) SyscallParamCount() int { + n := f.ParamCount() + switch { + case n <= 3: + return 3 + case n <= 6: + return 6 + case n <= 9: + return 9 + case n <= 12: + return 12 + case n <= 15: + return 15 + default: + panic("too many arguments to system call") + } +} + +// Syscall determines which SyscallX function to use for function f. +func (f *Fn) Syscall() string { + c := f.SyscallParamCount() + if c == 3 { + return syscalldot() + "Syscall" + } + return syscalldot() + "Syscall" + strconv.Itoa(c) +} + +// SyscallParamList returns source code for SyscallX parameters for function f. +func (f *Fn) SyscallParamList() string { + a := make([]string, 0) + for _, p := range f.Params { + a = append(a, p.SyscallArgList()...) + } + for len(a) < f.SyscallParamCount() { + a = append(a, "0") + } + return strings.Join(a, ", ") +} + +// HelperCallParamList returns source code of call into function f helper. +func (f *Fn) HelperCallParamList() string { + a := make([]string, 0, len(f.Params)) + for _, p := range f.Params { + s := p.Name + if p.Type == tString { + s = p.tmpVar() + } + a = append(a, s) + } + return strings.Join(a, ", ") +} + +// MaybeAbsent returns source code for handling functions that are possibly unavailable. +func (f *Fn) MaybeAbsent() string { + if !f.Rets.fnMaybeAbsent { + return "" + } + const code = `%[1]s = proc%[2]s.Find() + if %[1]s != nil { + return + }` + errorVar := f.Rets.ErrorVarName() + if errorVar == "" { + errorVar = varErr + } + return fmt.Sprintf(code, errorVar, f.DLLFuncName()) +} + +// IsUTF16 is true, if f is W (UTF-16) function and false for all A (ASCII) functions. +// Functions ending in neither will default to UTF-16, unless the `-utf16` flag is set +// to `false`. +func (f *Fn) IsUTF16() bool { + s := f.DLLFuncName() + return endsIn(s, 'W') || (*utf16 && !endsIn(s, 'A')) +} + +// StrconvFunc returns name of Go string to OS string function for f. +func (f *Fn) StrconvFunc() string { + if f.IsUTF16() { + return syscalldot() + "UTF16PtrFromString" + } + return syscalldot() + "BytePtrFromString" +} + +// StrconvType returns Go type name used for OS string for f. +func (f *Fn) StrconvType() string { + if f.IsUTF16() { + return "*uint16" + } + return "*byte" +} + +// HasStringParam is true, if f has at least one string parameter. +// Otherwise it is false. +func (f *Fn) HasStringParam() bool { + for _, p := range f.Params { + if p.Type == tString { + return true + } + } + return false +} + +// HelperName returns name of function f helper. +func (f *Fn) HelperName() string { + if !f.HasStringParam() { + return f.Name + } + return "_" + f.Name +} + +// Source files and functions. +type Source struct { + Funcs []*Fn + DLLFuncNames []*Fn + Files []string + StdLibImports []string + ExternalImports []string +} + +func (src *Source) Import(pkg string) { + src.StdLibImports = append(src.StdLibImports, pkg) + sort.Strings(src.StdLibImports) +} + +func (src *Source) ExternalImport(pkg string) { + src.ExternalImports = append(src.ExternalImports, pkg) + sort.Strings(src.ExternalImports) +} + +// ParseFiles parses files listed in fs and extracts all syscall +// functions listed in sys comments. It returns source files +// and functions collection *Source if successful. +func ParseFiles(fs []string) (*Source, error) { + src := &Source{ + Funcs: make([]*Fn, 0), + Files: make([]string, 0), + StdLibImports: []string{ + "unsafe", + }, + ExternalImports: make([]string, 0), + } + for _, file := range fs { + if err := src.ParseFile(file); err != nil { + return nil, err + } + } + src.DLLFuncNames = make([]*Fn, 0, len(src.Funcs)) + uniq := make(map[string]bool, len(src.Funcs)) + for _, fn := range src.Funcs { + name := fn.DLLFuncName() + if !uniq[name] { + src.DLLFuncNames = append(src.DLLFuncNames, fn) + uniq[name] = true + } + } + return src, nil +} + +// DLLs return dll names for a source set src. +func (src *Source) DLLs() []string { + uniq := make(map[string]bool) + r := make([]string, 0) + for _, f := range src.Funcs { + name := f.DLLName() + if _, found := uniq[name]; !found { + uniq[name] = true + r = append(r, name) + } + } + if *sortdecls { + sort.Strings(r) + } + return r +} + +// ParseFile adds additional file (or files, if path is a glob pattern) path to a source set src. +func (src *Source) ParseFile(path string) error { + file, err := os.Open(path) + if err == nil { + defer file.Close() + return src.parseFile(file) + } else if !(errors.Is(err, os.ErrNotExist) || errors.Is(err, windows.ERROR_INVALID_NAME)) { + return err + } + + paths, err := filepath.Glob(path) + if err != nil { + return err + } + + for _, path := range paths { + file, err := os.Open(path) + if err != nil { + return err + } + err = src.parseFile(file) + file.Close() + if err != nil { + return err + } + } + + return nil +} + +func (src *Source) parseFile(file *os.File) error { + s := bufio.NewScanner(file) + for s.Scan() { + t := trim(s.Text()) + if len(t) < 7 { + continue + } + if !strings.HasPrefix(t, "//sys") { + continue + } + t = t[5:] + if !(t[0] == ' ' || t[0] == '\t') { + continue + } + f, err := newFn(t[1:]) + if err != nil { + return err + } + src.Funcs = append(src.Funcs, f) + } + if err := s.Err(); err != nil { + return err + } + src.Files = append(src.Files, file.Name()) + if *sortdecls { + sort.Slice(src.Funcs, func(i, j int) bool { + fi, fj := src.Funcs[i], src.Funcs[j] + if fi.DLLName() == fj.DLLName() { + return fi.DLLFuncName() < fj.DLLFuncName() + } + return fi.DLLName() < fj.DLLName() + }) + } + + // get package name + fset := token.NewFileSet() + _, err := file.Seek(0, 0) + if err != nil { + return err + } + pkg, err := parser.ParseFile(fset, "", file, parser.PackageClauseOnly) + if err != nil { + return err + } + packageName = pkg.Name.Name + + return nil +} + +// IsStdRepo reports whether src is part of standard library. +func (src *Source) IsStdRepo() (bool, error) { + if len(src.Files) == 0 { + return false, errors.New("no input files provided") + } + abspath, err := filepath.Abs(src.Files[0]) + if err != nil { + return false, err + } + goroot := runtime.GOROOT() + if runtime.GOOS == "windows" { + abspath = strings.ToLower(abspath) + goroot = strings.ToLower(goroot) + } + sep := string(os.PathSeparator) + if !strings.HasSuffix(goroot, sep) { + goroot += sep + } + return strings.HasPrefix(abspath, goroot), nil +} + +// Generate output source file from a source set src. +func (src *Source) Generate(w io.Writer) error { + const ( + pkgStd = iota // any package in std library + pkgXSysWindows // x/sys/windows package + pkgOther + ) + isStdRepo, err := src.IsStdRepo() + if err != nil { + return err + } + var pkgtype int + switch { + case isStdRepo: + pkgtype = pkgStd + case packageName == "windows": + // TODO: this needs better logic than just using package name + pkgtype = pkgXSysWindows + default: + pkgtype = pkgOther + } + if *systemDLL { + switch pkgtype { + case pkgStd: + src.Import("internal/syscall/windows/sysdll") + case pkgXSysWindows: + default: + src.ExternalImport("golang.org/x/sys/windows") + } + } + if *winio { + src.ExternalImport("github.com/Microsoft/go-winio") + } + if packageName != "syscall" { + src.Import("syscall") + } + funcMap := template.FuncMap{ + "packagename": packagename, + "syscalldot": syscalldot, + "newlazydll": func(dll string) string { + arg := "\"" + dll + ".dll\"" + if !*systemDLL { + return syscalldot() + "NewLazyDLL(" + arg + ")" + } + if strings.HasPrefix(dll, "api_") || strings.HasPrefix(dll, "ext_") { + arg = strings.Replace(arg, "_", "-", -1) + } + switch pkgtype { + case pkgStd: + return syscalldot() + "NewLazyDLL(sysdll.Add(" + arg + "))" + case pkgXSysWindows: + return "NewLazySystemDLL(" + arg + ")" + default: + return "windows.NewLazySystemDLL(" + arg + ")" + } + }, + } + t := template.Must(template.New("main").Funcs(funcMap).Parse(srcTemplate)) + err = t.Execute(w, src) + if err != nil { + return errors.New("Failed to execute template: " + err.Error()) + } + return nil +} + +func usage() { + fmt.Fprintf(os.Stderr, "usage: mkwinsyscall [flags] [path ...]\n") + flag.PrintDefaults() + os.Exit(1) +} + +func main() { + flag.Usage = usage + flag.Parse() + if len(flag.Args()) <= 0 { + fmt.Fprintf(os.Stderr, "no files to parse provided\n") + usage() + } + + src, err := ParseFiles(flag.Args()) + if err != nil { + log.Fatal(err) + } + + var buf bytes.Buffer + if err := src.Generate(&buf); err != nil { + log.Fatal(err) + } + + data, err := format.Source(buf.Bytes()) + if err != nil { + log.Fatal(err) + } + if *filename == "" { + _, err = os.Stdout.Write(data) + } else { + //nolint:gosec // G306: code file, no need for wants 0600 + err = os.WriteFile(*filename, data, 0644) + } + if err != nil { + log.Fatal(err) + } +} + +// TODO: use println instead to print in the following template + +const srcTemplate = ` +{{define "main"}} //go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. + +package {{packagename}} + +import ( +{{range .StdLibImports}}"{{.}}" +{{end}} + +{{range .ExternalImports}}"{{.}}" +{{end}} +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = {{syscalldot}}Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = {{syscalldot}}EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e {{syscalldot}}Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( +{{template "dlls" .}} +{{template "funcnames" .}}) +{{range .Funcs}}{{if .HasStringParam}}{{template "helperbody" .}}{{end}}{{template "funcbody" .}}{{end}} +{{end}} + +{{/* help functions */}} + +{{define "dlls"}}{{range .DLLs}} mod{{.}} = {{newlazydll .}} +{{end}}{{end}} + +{{define "funcnames"}}{{range .DLLFuncNames}} proc{{.DLLFuncName}} = mod{{.DLLName}}.NewProc("{{.DLLFuncName}}") +{{end}}{{end}} + +{{define "helperbody"}} +func {{.Name}}({{.ParamList}}) {{template "results" .}}{ +{{template "helpertmpvars" .}} return {{.HelperName}}({{.HelperCallParamList}}) +} +{{end}} + +{{define "funcbody"}} +func {{.HelperName}}({{.HelperParamList}}) {{template "results" .}}{ +{{template "maybeabsent" .}} {{template "tmpvars" .}} {{template "syscall" .}} {{template "tmpvarsreadback" .}} +{{template "seterror" .}}{{template "printtrace" .}} return +} +{{end}} + +{{define "helpertmpvars"}}{{range .Params}}{{if .TmpVarHelperCode}} {{.TmpVarHelperCode}} +{{end}}{{end}}{{end}} + +{{define "maybeabsent"}}{{if .MaybeAbsent}}{{.MaybeAbsent}} +{{end}}{{end}} + +{{define "tmpvars"}}{{range .Params}}{{if .TmpVarCode}} {{.TmpVarCode}} +{{end}}{{end}}{{end}} + +{{define "results"}}{{if .Rets.List}}{{.Rets.List}} {{end}}{{end}} + +{{define "syscall"}}{{.Rets.SetReturnValuesCode}}{{.Syscall}}(proc{{.DLLFuncName}}.Addr(), {{.ParamCount}}, {{.SyscallParamList}}){{end}} + +{{define "tmpvarsreadback"}}{{range .Params}}{{if .TmpVarReadbackCode}} +{{.TmpVarReadbackCode}}{{end}}{{end}}{{end}} + +{{define "seterror"}}{{if .Rets.SetErrorCode}} {{.Rets.SetErrorCode}} +{{end}}{{end}} + +{{define "printtrace"}}{{if .PrintTrace}} print("SYSCALL: {{.Name}}(", {{.ParamPrintList}}") (", {{.Rets.PrintList}}")\n") +{{end}}{{end}} + +` diff --git a/vendor/github.com/Microsoft/go-winio/vhd/vhd.go b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go index f7f78fc230485..b54cad112703e 100644 --- a/vendor/github.com/Microsoft/go-winio/vhd/vhd.go +++ b/vendor/github.com/Microsoft/go-winio/vhd/vhd.go @@ -11,7 +11,7 @@ import ( "golang.org/x/sys/windows" ) -//go:generate go run mksyscall_windows.go -output zvhd_windows.go vhd.go +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zvhd_windows.go vhd.go //sys createVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, securityDescriptor *uintptr, createVirtualDiskFlags uint32, providerSpecificFlags uint32, parameters *CreateVirtualDiskParameters, overlapped *syscall.Overlapped, handle *syscall.Handle) (win32err error) = virtdisk.CreateVirtualDisk //sys openVirtualDisk(virtualStorageType *VirtualStorageType, path string, virtualDiskAccessMask uint32, openVirtualDiskFlags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (win32err error) = virtdisk.OpenVirtualDisk @@ -62,8 +62,8 @@ type OpenVirtualDiskParameters struct { Version2 OpenVersion2 } -// The higher level `OpenVersion2` struct uses bools to refer to `GetInfoOnly` and `ReadOnly` for ease of use. However, -// the internal windows structure uses `BOOLS` aka int32s for these types. `openVersion2` is used for translating +// The higher level `OpenVersion2` struct uses `bool`s to refer to `GetInfoOnly` and `ReadOnly` for ease of use. However, +// the internal windows structure uses `BOOL`s aka int32s for these types. `openVersion2` is used for translating // `OpenVersion2` fields to the correct windows internal field types on the `Open____` methods. type openVersion2 struct { getInfoOnly int32 @@ -87,9 +87,10 @@ type AttachVirtualDiskParameters struct { } const ( + //revive:disable-next-line:var-naming ALL_CAPS VIRTUAL_STORAGE_TYPE_DEVICE_VHDX = 0x3 - // Access Mask for opening a VHD + // Access Mask for opening a VHD. VirtualDiskAccessNone VirtualDiskAccessMask = 0x00000000 VirtualDiskAccessAttachRO VirtualDiskAccessMask = 0x00010000 VirtualDiskAccessAttachRW VirtualDiskAccessMask = 0x00020000 @@ -101,7 +102,7 @@ const ( VirtualDiskAccessAll VirtualDiskAccessMask = 0x003f0000 VirtualDiskAccessWritable VirtualDiskAccessMask = 0x00320000 - // Flags for creating a VHD + // Flags for creating a VHD. CreateVirtualDiskFlagNone CreateVirtualDiskFlag = 0x0 CreateVirtualDiskFlagFullPhysicalAllocation CreateVirtualDiskFlag = 0x1 CreateVirtualDiskFlagPreventWritesToSourceDisk CreateVirtualDiskFlag = 0x2 @@ -109,12 +110,12 @@ const ( CreateVirtualDiskFlagCreateBackingStorage CreateVirtualDiskFlag = 0x8 CreateVirtualDiskFlagUseChangeTrackingSourceLimit CreateVirtualDiskFlag = 0x10 CreateVirtualDiskFlagPreserveParentChangeTrackingState CreateVirtualDiskFlag = 0x20 - CreateVirtualDiskFlagVhdSetUseOriginalBackingStorage CreateVirtualDiskFlag = 0x40 + CreateVirtualDiskFlagVhdSetUseOriginalBackingStorage CreateVirtualDiskFlag = 0x40 //revive:disable-line:var-naming VHD, not Vhd CreateVirtualDiskFlagSparseFile CreateVirtualDiskFlag = 0x80 - CreateVirtualDiskFlagPmemCompatible CreateVirtualDiskFlag = 0x100 + CreateVirtualDiskFlagPmemCompatible CreateVirtualDiskFlag = 0x100 //revive:disable-line:var-naming PMEM, not Pmem CreateVirtualDiskFlagSupportCompressedVolumes CreateVirtualDiskFlag = 0x200 - // Flags for opening a VHD + // Flags for opening a VHD. OpenVirtualDiskFlagNone VirtualDiskFlag = 0x00000000 OpenVirtualDiskFlagNoParents VirtualDiskFlag = 0x00000001 OpenVirtualDiskFlagBlankFile VirtualDiskFlag = 0x00000002 @@ -127,7 +128,7 @@ const ( OpenVirtualDiskFlagNoWriteHardening VirtualDiskFlag = 0x00000100 OpenVirtualDiskFlagSupportCompressedVolumes VirtualDiskFlag = 0x00000200 - // Flags for attaching a VHD + // Flags for attaching a VHD. AttachVirtualDiskFlagNone AttachVirtualDiskFlag = 0x00000000 AttachVirtualDiskFlagReadOnly AttachVirtualDiskFlag = 0x00000001 AttachVirtualDiskFlagNoDriveLetter AttachVirtualDiskFlag = 0x00000002 @@ -140,12 +141,14 @@ const ( AttachVirtualDiskFlagSinglePartition AttachVirtualDiskFlag = 0x00000100 AttachVirtualDiskFlagRegisterVolume AttachVirtualDiskFlag = 0x00000200 - // Flags for detaching a VHD + // Flags for detaching a VHD. DetachVirtualDiskFlagNone DetachVirtualDiskFlag = 0x0 ) // CreateVhdx is a helper function to create a simple vhdx file at the given path using // default values. +// +//revive:disable-next-line:var-naming VHDX, not Vhdx func CreateVhdx(path string, maxSizeInGb, blockSizeInMb uint32) error { params := CreateVirtualDiskParameters{ Version: 2, @@ -172,6 +175,8 @@ func DetachVirtualDisk(handle syscall.Handle) (err error) { } // DetachVhd detaches a vhd found at `path`. +// +//revive:disable-next-line:var-naming VHD, not Vhd func DetachVhd(path string) error { handle, err := OpenVirtualDisk( path, @@ -181,12 +186,16 @@ func DetachVhd(path string) error { if err != nil { return err } - defer syscall.CloseHandle(handle) + defer syscall.CloseHandle(handle) //nolint:errcheck return DetachVirtualDisk(handle) } // AttachVirtualDisk attaches a virtual hard disk for use. -func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtualDiskFlag, parameters *AttachVirtualDiskParameters) (err error) { +func AttachVirtualDisk( + handle syscall.Handle, + attachVirtualDiskFlag AttachVirtualDiskFlag, + parameters *AttachVirtualDiskParameters, +) (err error) { // Supports both version 1 and 2 of the attach parameters as version 2 wasn't present in RS5. if err := attachVirtualDisk( handle, @@ -203,6 +212,8 @@ func AttachVirtualDisk(handle syscall.Handle, attachVirtualDiskFlag AttachVirtua // AttachVhd attaches a virtual hard disk at `path` for use. Attaches using version 2 // of the ATTACH_VIRTUAL_DISK_PARAMETERS. +// +//revive:disable-next-line:var-naming VHD, not Vhd func AttachVhd(path string) (err error) { handle, err := OpenVirtualDisk( path, @@ -213,7 +224,7 @@ func AttachVhd(path string) (err error) { return err } - defer syscall.CloseHandle(handle) + defer syscall.CloseHandle(handle) //nolint:errcheck params := AttachVirtualDiskParameters{Version: 2} if err := AttachVirtualDisk( handle, @@ -226,7 +237,11 @@ func AttachVhd(path string) (err error) { } // OpenVirtualDisk obtains a handle to a VHD opened with supplied access mask and flags. -func OpenVirtualDisk(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag) (syscall.Handle, error) { +func OpenVirtualDisk( + vhdPath string, + virtualDiskAccessMask VirtualDiskAccessMask, + openVirtualDiskFlags VirtualDiskFlag, +) (syscall.Handle, error) { parameters := OpenVirtualDiskParameters{Version: 2} handle, err := OpenVirtualDiskWithParameters( vhdPath, @@ -241,7 +256,12 @@ func OpenVirtualDisk(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask } // OpenVirtualDiskWithParameters obtains a handle to a VHD opened with supplied access mask, flags and parameters. -func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask VirtualDiskAccessMask, openVirtualDiskFlags VirtualDiskFlag, parameters *OpenVirtualDiskParameters) (syscall.Handle, error) { +func OpenVirtualDiskWithParameters( + vhdPath string, + virtualDiskAccessMask VirtualDiskAccessMask, + openVirtualDiskFlags VirtualDiskFlag, + parameters *OpenVirtualDiskParameters, +) (syscall.Handle, error) { var ( handle syscall.Handle defaultType VirtualStorageType @@ -279,7 +299,12 @@ func OpenVirtualDiskWithParameters(vhdPath string, virtualDiskAccessMask Virtual } // CreateVirtualDisk creates a virtual harddisk and returns a handle to the disk. -func CreateVirtualDisk(path string, virtualDiskAccessMask VirtualDiskAccessMask, createVirtualDiskFlags CreateVirtualDiskFlag, parameters *CreateVirtualDiskParameters) (syscall.Handle, error) { +func CreateVirtualDisk( + path string, + virtualDiskAccessMask VirtualDiskAccessMask, + createVirtualDiskFlags CreateVirtualDiskFlag, + parameters *CreateVirtualDiskParameters, +) (syscall.Handle, error) { var ( handle syscall.Handle defaultType VirtualStorageType @@ -323,6 +348,8 @@ func GetVirtualDiskPhysicalPath(handle syscall.Handle) (_ string, err error) { } // CreateDiffVhd is a helper function to create a differencing virtual disk. +// +//revive:disable-next-line:var-naming VHD, not Vhd func CreateDiffVhd(diffVhdPath, baseVhdPath string, blockSizeInMB uint32) error { // Setting `ParentPath` is how to signal to create a differencing disk. createParams := &CreateVirtualDiskParameters{ diff --git a/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go b/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go index 1d7498db3beea..d0e917d2be374 100644 --- a/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go +++ b/vendor/github.com/Microsoft/go-winio/vhd/zvhd_windows.go @@ -1,4 +1,6 @@ -// Code generated by 'go generate'; DO NOT EDIT. +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. package vhd diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go index 176ff75e320cf..83f45a1351baf 100644 --- a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -1,4 +1,6 @@ -// Code generated by 'go generate'; DO NOT EDIT. +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. package winio @@ -47,9 +49,11 @@ var ( procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") + procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") @@ -74,7 +78,6 @@ var ( procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") - procbind = modws2_32.NewProc("bind") ) func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { @@ -123,6 +126,14 @@ func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision return } +func convertStringSidToSid(str *uint16, sid **byte) (err error) { + r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(sid)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func getSecurityDescriptorLength(sd uintptr) (len uint32) { r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) len = uint32(r0) @@ -154,6 +165,14 @@ func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidS return } +func lookupAccountSid(systemName *uint16, sid *byte, name *uint16, nameSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { var _p0 *uint16 _p0, err = syscall.UTF16PtrFromString(systemName) @@ -380,25 +399,25 @@ func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err erro return } -func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) { +func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntStatus) { r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) - status = ntstatus(r0) + status = ntStatus(r0) return } -func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) { +func rtlDefaultNpAcl(dacl *uintptr) (status ntStatus) { r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0) - status = ntstatus(r0) + status = ntStatus(r0) return } -func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) { +func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntStatus) { r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0) - status = ntstatus(r0) + status = ntStatus(r0) return } -func rtlNtStatusToDosError(status ntstatus) (winerr error) { +func rtlNtStatusToDosError(status ntStatus) (winerr error) { r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) if r0 != 0 { winerr = syscall.Errno(r0) @@ -417,11 +436,3 @@ func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint } return } - -func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) - if r1 == socketError { - err = errnoErr(e1) - } - return -} diff --git a/vendor/github.com/Microsoft/hcsshim/.gitattributes b/vendor/github.com/Microsoft/hcsshim/.gitattributes index 94f480de94e1d..dd0d09faacf47 100644 --- a/vendor/github.com/Microsoft/hcsshim/.gitattributes +++ b/vendor/github.com/Microsoft/hcsshim/.gitattributes @@ -1 +1,3 @@ -* text=auto eol=lf \ No newline at end of file +* text=auto eol=lf +vendor/** -text +test/vendor/** -text \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/.gitignore b/vendor/github.com/Microsoft/hcsshim/.gitignore index 54ed6f06c9d70..785972e036c25 100644 --- a/vendor/github.com/Microsoft/hcsshim/.gitignore +++ b/vendor/github.com/Microsoft/hcsshim/.gitignore @@ -6,6 +6,7 @@ # Ignore vscode setting files .vscode/ +.idea/ # Test binary, build with `go test -c` *.test @@ -23,16 +24,26 @@ service/pkg/ *.img *.vhd *.tar.gz +*.tar # Make stuff .rootfs-done bin/* rootfs/* +rootfs-conv/* *.o /build/ deps/* out/* -.idea/ -.vscode/ \ No newline at end of file +# test results +test/results + +# go workspace files +go.work +go.work.sum + +# keys and related artifacts +*.pem +*.cose diff --git a/vendor/github.com/Microsoft/hcsshim/.golangci.yml b/vendor/github.com/Microsoft/hcsshim/.golangci.yml index 2400e7f1e02de..fdc2a385b7036 100644 --- a/vendor/github.com/Microsoft/hcsshim/.golangci.yml +++ b/vendor/github.com/Microsoft/hcsshim/.golangci.yml @@ -1,23 +1,51 @@ run: timeout: 8m + tests: true + build-tags: + - admin + - functional + - integration + skip-dirs: + # paths are relative to module root + - cri-containerd/test-images linters: enable: - - stylecheck + # defaults: + # - errcheck + # - gosimple + # - govet + # - ineffassign + # - staticcheck + # - typecheck + # - unused + + - gofmt # whether code was gofmt-ed + - nolintlint # ill-formed or insufficient nolint directives + - stylecheck # golint replacement + - thelper # test helpers without t.Helper() linters-settings: stylecheck: # https://staticcheck.io/docs/checks checks: ["all"] - issues: - # This repo has a LOT of generated schema files, operating system bindings, and other things that ST1003 from stylecheck won't like - # (screaming case Windows api constants for example). There's also some structs that we *could* change the initialisms to be Go - # friendly (Id -> ID) but they're exported and it would be a breaking change. This makes it so that most new code, code that isn't - # supposed to be a pretty faithful mapping to an OS call/constants, or non-generated code still checks if we're following idioms, - # while ignoring the things that are just noise or would be more of a hassle than it'd be worth to change. exclude-rules: + # path is relative to module root, which is ./test/ + - path: cri-containerd + linters: + - stylecheck + text: "^ST1003: should not use underscores in package names$" + source: "^package cri_containerd$" + + # This repo has a LOT of generated schema files, operating system bindings, and other + # things that ST1003 from stylecheck won't like (screaming case Windows api constants for example). + # There's also some structs that we *could* change the initialisms to be Go friendly + # (Id -> ID) but they're exported and it would be a breaking change. + # This makes it so that most new code, code that isn't supposed to be a pretty faithful + # mapping to an OS call/constants, or non-generated code still checks if we're following idioms, + # while ignoring the things that are just noise or would be more of a hassle than it'd be worth to change. - path: layer.go linters: - stylecheck @@ -28,11 +56,21 @@ issues: - stylecheck Text: "ST1003:" - - path: internal\\hcs\\schema2\\ + - path: cmd\\ncproxy\\nodenetsvc\\ + linters: + - stylecheck + Text: "ST1003:" + + - path: cmd\\ncproxy_mock\\ linters: - stylecheck Text: "ST1003:" + - path: internal\\hcs\\schema2\\ + linters: + - stylecheck + - gofmt + - path: internal\\wclayer\\ linters: - stylecheck @@ -96,4 +134,4 @@ issues: - path: internal\\hcserror\\ linters: - stylecheck - Text: "ST1003:" \ No newline at end of file + Text: "ST1003:" diff --git a/vendor/github.com/Microsoft/hcsshim/Makefile b/vendor/github.com/Microsoft/hcsshim/Makefile index a8f5516cd0b1b..742c76d848ce7 100644 --- a/vendor/github.com/Microsoft/hcsshim/Makefile +++ b/vendor/github.com/Microsoft/hcsshim/Makefile @@ -1,4 +1,5 @@ BASE:=base.tar.gz +DEV_BUILD:=0 GO:=go GO_FLAGS:=-ldflags "-s -w" # strip Go binaries @@ -12,16 +13,31 @@ GO_FLAGS_EXTRA:= ifeq "$(GOMODVENDOR)" "1" GO_FLAGS_EXTRA += -mod=vendor endif +GO_BUILD_TAGS:= +ifneq ($(strip $(GO_BUILD_TAGS)),) +GO_FLAGS_EXTRA += -tags="$(GO_BUILD_TAGS)" +endif GO_BUILD:=CGO_ENABLED=$(CGO_ENABLED) $(GO) build $(GO_FLAGS) $(GO_FLAGS_EXTRA) SRCROOT=$(dir $(abspath $(firstword $(MAKEFILE_LIST)))) +# additional directories to search for rule prerequisites and targets +VPATH=$(SRCROOT) + +DELTA_TARGET=out/delta.tar.gz + +ifeq "$(DEV_BUILD)" "1" +DELTA_TARGET=out/delta-dev.tar.gz +endif # The link aliases for gcstools GCS_TOOLS=\ - generichook + generichook \ + install-drivers .PHONY: all always rootfs test +.DEFAULT_GOAL := all + all: out/initrd.img out/rootfs.tar.gz clean: @@ -29,21 +45,13 @@ clean: rm -rf bin deps rootfs out test: - cd $(SRCROOT) && go test -v ./internal/guest/... + cd $(SRCROOT) && $(GO) test -v ./internal/guest/... -out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools Makefile - @mkdir -p out - rm -rf rootfs - mkdir -p rootfs/bin/ - cp bin/init rootfs/ - cp bin/vsockexec rootfs/bin/ - cp bin/cmd/gcs rootfs/bin/ - cp bin/cmd/gcstools rootfs/bin/ - for tool in $(GCS_TOOLS); do ln -s gcstools rootfs/bin/$$tool; done - git -C $(SRCROOT) rev-parse HEAD > rootfs/gcs.commit && \ - git -C $(SRCROOT) rev-parse --abbrev-ref HEAD > rootfs/gcs.branch - tar -zcf $@ -C rootfs . - rm -rf rootfs +rootfs: out/rootfs.vhd + +out/rootfs.vhd: out/rootfs.tar.gz bin/cmd/tar2ext4 + gzip -f -d ./out/rootfs.tar.gz + bin/cmd/tar2ext4 -vhd -i ./out/rootfs.tar -o $@ out/rootfs.tar.gz: out/initrd.img rm -rf rootfs-conv @@ -52,13 +60,45 @@ out/rootfs.tar.gz: out/initrd.img tar -zcf $@ -C rootfs-conv . rm -rf rootfs-conv -out/initrd.img: $(BASE) out/delta.tar.gz $(SRCROOT)/hack/catcpio.sh - $(SRCROOT)/hack/catcpio.sh "$(BASE)" out/delta.tar.gz > out/initrd.img.uncompressed +out/initrd.img: $(BASE) $(DELTA_TARGET) $(SRCROOT)/hack/catcpio.sh + $(SRCROOT)/hack/catcpio.sh "$(BASE)" $(DELTA_TARGET) > out/initrd.img.uncompressed gzip -c out/initrd.img.uncompressed > $@ rm out/initrd.img.uncompressed +# This target includes utilities which may be useful for testing purposes. +out/delta-dev.tar.gz: out/delta.tar.gz bin/internal/tools/snp-report + rm -rf rootfs-dev + mkdir rootfs-dev + tar -xzf out/delta.tar.gz -C rootfs-dev + cp bin/internal/tools/snp-report rootfs-dev/bin/ + tar -zcf $@ -C rootfs-dev . + rm -rf rootfs-dev + +out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths Makefile + @mkdir -p out + rm -rf rootfs + mkdir -p rootfs/bin/ + mkdir -p rootfs/info/ + cp bin/init rootfs/ + cp bin/vsockexec rootfs/bin/ + cp bin/cmd/gcs rootfs/bin/ + cp bin/cmd/gcstools rootfs/bin/ + cp bin/cmd/hooks/wait-paths rootfs/bin/ + for tool in $(GCS_TOOLS); do ln -s gcstools rootfs/bin/$$tool; done + git -C $(SRCROOT) rev-parse HEAD > rootfs/info/gcs.commit && \ + git -C $(SRCROOT) rev-parse --abbrev-ref HEAD > rootfs/info/gcs.branch && \ + date --iso-8601=minute --utc > rootfs/info/tar.date + $(if $(and $(realpath $(subst .tar,.testdata.json,$(BASE))), $(shell which jq)), \ + jq -r '.IMAGE_NAME' $(subst .tar,.testdata.json,$(BASE)) 2>/dev/null > rootfs/info/image.name && \ + jq -r '.DATETIME' $(subst .tar,.testdata.json,$(BASE)) 2>/dev/null > rootfs/info/build.date) + tar -zcf $@ -C rootfs . + rm -rf rootfs + -include deps/cmd/gcs.gomake -include deps/cmd/gcstools.gomake +-include deps/cmd/hooks/wait-paths.gomake +-include deps/cmd/tar2ext4.gomake +-include deps/internal/tools/snp-report.gomake # Implicit rule for includes that define Go targets. %.gomake: $(SRCROOT)/Makefile @@ -72,8 +112,6 @@ out/initrd.img: $(BASE) out/delta.tar.gz $(SRCROOT)/hack/catcpio.sh @/bin/echo -e '-include $(@:%.gomake=%.godeps)' >> $@.new mv $@.new $@ -VPATH=$(SRCROOT) - bin/vsockexec: vsockexec/vsockexec.o vsockexec/vsock.o @mkdir -p bin $(CC) $(LDFLAGS) -o $@ $^ diff --git a/vendor/github.com/Microsoft/hcsshim/Protobuild.toml b/vendor/github.com/Microsoft/hcsshim/Protobuild.toml index ee18671aa6407..471f133866253 100644 --- a/vendor/github.com/Microsoft/hcsshim/Protobuild.toml +++ b/vendor/github.com/Microsoft/hcsshim/Protobuild.toml @@ -1,4 +1,4 @@ -version = "unstable" +version = "1" generator = "gogoctrd" plugins = ["grpc", "fieldpath"] @@ -14,11 +14,6 @@ plugins = ["grpc", "fieldpath"] # target package. packages = ["github.com/gogo/protobuf"] - # Paths that will be added untouched to the end of the includes. We use - # `/usr/local/include` to pickup the common install location of protobuf. - # This is the default. - after = ["/usr/local/include"] - # This section maps protobuf imports to Go packages. These will become # `-M` directives in the call to the go protobuf generator. [packages] @@ -36,6 +31,10 @@ plugins = ["grpc", "fieldpath"] prefixes = ["github.com/Microsoft/hcsshim/internal/shimdiag"] plugins = ["ttrpc"] +[[overrides]] +prefixes = ["github.com/Microsoft/hcsshim/internal/extendedtask"] +plugins = ["ttrpc"] + [[overrides]] prefixes = ["github.com/Microsoft/hcsshim/internal/computeagent"] plugins = ["ttrpc"] diff --git a/vendor/github.com/Microsoft/hcsshim/README.md b/vendor/github.com/Microsoft/hcsshim/README.md index b8ca926a9da59..5a1361539bea2 100644 --- a/vendor/github.com/Microsoft/hcsshim/README.md +++ b/vendor/github.com/Microsoft/hcsshim/README.md @@ -75,24 +75,6 @@ certify they either authored the work themselves or otherwise have permission to more info, as well as to make sure that you can attest to the rules listed. Our CI uses the [DCO Github app](https://github.com/apps/dco) to ensure that all commits in a given PR are signed-off. -### Test Directory (Important to note) - -This project has tried to trim some dependencies from the root Go modules file that would be cumbersome to get transitively included if this -project is being vendored/used as a library. Some of these dependencies were only being used for tests, so the /test directory in this project also has -its own go.mod file where these are now included to get around this issue. Our tests rely on the code in this project to run, so the test Go modules file -has a relative path replace directive to pull in the latest hcsshim code that the tests actually touch from this project -(which is the repo itself on your disk). - -``` -replace ( - github.com/Microsoft/hcsshim => ../ -) -``` - -Because of this, for most code changes you may need to run `go mod vendor` + `go mod tidy` in the /test directory in this repository, as the -CI in this project will check if the files are out of date and will fail if this is true. - - ## Code of Conduct This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). @@ -101,7 +83,7 @@ contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additio ## Dependencies -This project requires Golang 1.9 or newer to build. +This project requires Golang 1.17 or newer to build. For system requirements to run this project, see the Microsoft docs on [Windows Container requirements](https://docs.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/system-requirements). diff --git a/vendor/github.com/Microsoft/hcsshim/SECURITY.md b/vendor/github.com/Microsoft/hcsshim/SECURITY.md new file mode 100644 index 0000000000000..869fdfe2b2469 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/SECURITY.md @@ -0,0 +1,41 @@ + + +## Security + +Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). + +If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. + +## Reporting Security Issues + +**Please do not report security vulnerabilities through public GitHub issues.** + +Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). + +If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). + +You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). + +Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: + + * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) + * Full paths of source file(s) related to the manifestation of the issue + * The location of the affected source code (tag/branch/commit or direct URL) + * Any special configuration required to reproduce the issue + * Step-by-step instructions to reproduce the issue + * Proof-of-concept or exploit code (if possible) + * Impact of the issue, including how an attacker might exploit the issue + +This information will help us triage your report more quickly. + +If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. + +## Preferred Languages + +We prefer all communications to be in English. + +## Policy + +Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). + + diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.pb.go b/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.pb.go index 89aff3723a5db..6d35b9ca89c27 100644 --- a/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.pb.go +++ b/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.pb.go @@ -6,10 +6,12 @@ package options import ( fmt "fmt" proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" _ "github.com/gogo/protobuf/types" github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" io "io" math "math" + math_bits "math/bits" reflect "reflect" strings "strings" time "time" @@ -25,7 +27,7 @@ var _ = time.Kitchen // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package type Options_DebugType int32 @@ -143,7 +145,14 @@ type Options struct { // io_retry_timeout_in_sec is the timeout in seconds for how long to try and reconnect to an upstream IO provider if a connection is lost. // The typical example is if Containerd has restarted but is expected to come back online. A 0 for this field is interpreted as an infinite // timeout. - IoRetryTimeoutInSec int32 `protobuf:"varint,17,opt,name=io_retry_timeout_in_sec,json=ioRetryTimeoutInSec,proto3" json:"io_retry_timeout_in_sec,omitempty"` + IoRetryTimeoutInSec int32 `protobuf:"varint,17,opt,name=io_retry_timeout_in_sec,json=ioRetryTimeoutInSec,proto3" json:"io_retry_timeout_in_sec,omitempty"` + // default_container_annotations specifies a set of annotations that should be set for every workload container + DefaultContainerAnnotations map[string]string `protobuf:"bytes,18,rep,name=default_container_annotations,json=defaultContainerAnnotations,proto3" json:"default_container_annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // no_inherit_host_timezone specifies to skip inheriting the hosts time zone for WCOW UVMs and instead default to + // UTC. + NoInheritHostTimezone bool `protobuf:"varint,19,opt,name=no_inherit_host_timezone,json=noInheritHostTimezone,proto3" json:"no_inherit_host_timezone,omitempty"` + // scrub_logs enables removing environment variables and other potentially sensitive information from logs + ScrubLogs bool `protobuf:"varint,20,opt,name=scrub_logs,json=scrubLogs,proto3" json:"scrub_logs,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -162,7 +171,7 @@ func (m *Options) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { return xxx_messageInfo_Options.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -211,7 +220,7 @@ func (m *ProcessDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro return xxx_messageInfo_ProcessDetails.Marshal(b, m, deterministic) } else { b = b[:cap(b)] - n, err := m.MarshalTo(b) + n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } @@ -234,6 +243,7 @@ func init() { proto.RegisterEnum("containerd.runhcs.v1.Options_DebugType", Options_DebugType_name, Options_DebugType_value) proto.RegisterEnum("containerd.runhcs.v1.Options_SandboxIsolation", Options_SandboxIsolation_name, Options_SandboxIsolation_value) proto.RegisterType((*Options)(nil), "containerd.runhcs.v1.Options") + proto.RegisterMapType((map[string]string)(nil), "containerd.runhcs.v1.Options.DefaultContainerAnnotationsEntry") proto.RegisterType((*ProcessDetails)(nil), "containerd.runhcs.v1.ProcessDetails") } @@ -242,73 +252,80 @@ func init() { } var fileDescriptor_b643df6839c75082 = []byte{ - // 953 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x5d, 0x6f, 0xdb, 0x36, - 0x17, 0xb6, 0xda, 0x24, 0xb6, 0x4e, 0xbe, 0x1c, 0x36, 0x40, 0x85, 0xe4, 0xad, 0x6d, 0xa4, 0x2f, - 0xd0, 0x14, 0x6b, 0xa4, 0xa4, 0xdb, 0xdd, 0x06, 0x0c, 0x8d, 0xed, 0xb4, 0x1e, 0xf2, 0x61, 0xc8, - 0x59, 0xba, 0x8f, 0x0b, 0x42, 0x1f, 0x8c, 0x4c, 0x54, 0x12, 0x05, 0x92, 0xf6, 0xe2, 0x5e, 0xed, - 0x27, 0xec, 0x87, 0xec, 0x87, 0xe4, 0x72, 0x97, 0x03, 0x06, 0x64, 0xab, 0x7f, 0xc9, 0x40, 0x8a, - 0x4a, 0xbb, 0x20, 0xd8, 0xcd, 0xae, 0x4c, 0x3e, 0xcf, 0xc3, 0x87, 0xe7, 0x1c, 0x9d, 0x43, 0xc3, - 0x59, 0x42, 0xe5, 0x78, 0x12, 0xba, 0x11, 0xcb, 0xbc, 0x13, 0x1a, 0x71, 0x26, 0xd8, 0xa5, 0xf4, - 0xc6, 0x91, 0x10, 0x63, 0x9a, 0x79, 0x51, 0x16, 0x7b, 0x11, 0xcb, 0x65, 0x40, 0x73, 0xc2, 0xe3, - 0x3d, 0x85, 0xed, 0xf1, 0x49, 0x3e, 0x8e, 0xc4, 0xde, 0xf4, 0xc0, 0x63, 0x85, 0xa4, 0x2c, 0x17, - 0x5e, 0x89, 0xb8, 0x05, 0x67, 0x92, 0xa1, 0xcd, 0x8f, 0x7a, 0xd7, 0x10, 0xd3, 0x83, 0xad, 0xcd, - 0x84, 0x25, 0x4c, 0x0b, 0x3c, 0xb5, 0x2a, 0xb5, 0x5b, 0xed, 0x84, 0xb1, 0x24, 0x25, 0x9e, 0xde, - 0x85, 0x93, 0x4b, 0x4f, 0xd2, 0x8c, 0x08, 0x19, 0x64, 0x45, 0x29, 0xd8, 0xf9, 0xb5, 0x0e, 0xf5, - 0xb3, 0xf2, 0x16, 0xb4, 0x09, 0x8b, 0x31, 0x09, 0x27, 0x89, 0x63, 0x75, 0xac, 0xdd, 0x86, 0x5f, - 0x6e, 0xd0, 0x11, 0x80, 0x5e, 0x60, 0x39, 0x2b, 0x88, 0xf3, 0xa0, 0x63, 0xed, 0xae, 0xbd, 0x7c, - 0xe6, 0xde, 0x17, 0x83, 0x6b, 0x8c, 0xdc, 0x9e, 0xd2, 0x9f, 0xcf, 0x0a, 0xe2, 0xdb, 0x71, 0xb5, - 0x44, 0x4f, 0x61, 0x95, 0x93, 0x84, 0x0a, 0xc9, 0x67, 0x98, 0x33, 0x26, 0x9d, 0x87, 0x1d, 0x6b, - 0xd7, 0xf6, 0x57, 0x2a, 0xd0, 0x67, 0x4c, 0x2a, 0x91, 0x08, 0xf2, 0x38, 0x64, 0x57, 0x98, 0x66, - 0x41, 0x42, 0x9c, 0x85, 0x52, 0x64, 0xc0, 0x81, 0xc2, 0xd0, 0x73, 0x68, 0x56, 0xa2, 0x22, 0x0d, - 0xe4, 0x25, 0xe3, 0x99, 0xb3, 0xa8, 0x75, 0xeb, 0x06, 0x1f, 0x1a, 0x18, 0xfd, 0x08, 0x1b, 0xb7, - 0x7e, 0x82, 0xa5, 0x81, 0x8a, 0xcf, 0x59, 0xd2, 0x39, 0xb8, 0xff, 0x9e, 0xc3, 0xc8, 0xdc, 0x58, - 0x9d, 0xf2, 0xab, 0x3b, 0x6f, 0x11, 0xe4, 0xc1, 0x66, 0xc8, 0x98, 0xc4, 0x97, 0x34, 0x25, 0x42, - 0xe7, 0x84, 0x8b, 0x40, 0x8e, 0x9d, 0xba, 0x8e, 0x65, 0x43, 0x71, 0x47, 0x8a, 0x52, 0x99, 0x0d, - 0x03, 0x39, 0x46, 0x2f, 0x00, 0x4d, 0x33, 0x5c, 0x70, 0x16, 0x11, 0x21, 0x18, 0xc7, 0x11, 0x9b, - 0xe4, 0xd2, 0x69, 0x74, 0xac, 0xdd, 0x45, 0xbf, 0x39, 0xcd, 0x86, 0x15, 0xd1, 0x55, 0x38, 0x72, - 0x61, 0x73, 0x9a, 0xe1, 0x8c, 0x64, 0x8c, 0xcf, 0xb0, 0xa0, 0xef, 0x09, 0xa6, 0x39, 0xce, 0x42, - 0xc7, 0xae, 0xf4, 0x27, 0x9a, 0x1a, 0xd1, 0xf7, 0x64, 0x90, 0x9f, 0x84, 0xa8, 0x05, 0xf0, 0x7a, - 0xf8, 0xed, 0xc5, 0x9b, 0x9e, 0xba, 0xcb, 0x01, 0x1d, 0xc4, 0x27, 0x08, 0xfa, 0x0a, 0xb6, 0x45, - 0x14, 0xa4, 0x04, 0x47, 0xc5, 0x04, 0xa7, 0x34, 0xa3, 0x52, 0x60, 0xc9, 0xb0, 0x49, 0xcb, 0x59, - 0xd6, 0x1f, 0xfd, 0xb1, 0x96, 0x74, 0x8b, 0xc9, 0xb1, 0x16, 0x9c, 0x33, 0x53, 0x07, 0x74, 0x02, - 0xff, 0x8f, 0xc9, 0x65, 0x30, 0x49, 0x25, 0xbe, 0xad, 0x1b, 0x16, 0x11, 0x0f, 0x64, 0x34, 0xbe, - 0x8d, 0x2e, 0x09, 0x9d, 0x15, 0x1d, 0x5d, 0xdb, 0x68, 0xbb, 0x95, 0x74, 0x54, 0x2a, 0xcb, 0x60, - 0x5f, 0x87, 0xe8, 0x6b, 0x78, 0x52, 0xd9, 0x4d, 0xb3, 0xfb, 0x7c, 0x56, 0xb5, 0x8f, 0x63, 0x44, - 0x17, 0xd9, 0x5d, 0x03, 0xd5, 0x29, 0xe3, 0x80, 0x93, 0xea, 0xac, 0xb3, 0xa6, 0xe3, 0x5f, 0xd1, - 0xa0, 0x11, 0xa3, 0x0e, 0x2c, 0x9f, 0x76, 0x87, 0x9c, 0x5d, 0xcd, 0x5e, 0xc5, 0x31, 0x77, 0xd6, - 0x75, 0x4d, 0x3e, 0x85, 0xd0, 0x36, 0xd8, 0x29, 0x4b, 0x70, 0x4a, 0xa6, 0x24, 0x75, 0x9a, 0x9a, - 0x6f, 0xa4, 0x2c, 0x39, 0x56, 0x7b, 0xf4, 0x05, 0x3c, 0xa6, 0x0c, 0x73, 0xa2, 0x5a, 0x56, 0x0d, - 0x0e, 0x9b, 0x48, 0x15, 0x9d, 0x20, 0x91, 0xb3, 0xa1, 0xc3, 0x7b, 0x44, 0x99, 0xaf, 0xd8, 0xf3, - 0x92, 0x1c, 0xe4, 0x23, 0x12, 0xed, 0x3c, 0x07, 0xfb, 0x76, 0x00, 0x90, 0x0d, 0x8b, 0xa7, 0xc3, - 0xc1, 0xb0, 0xdf, 0xac, 0xa1, 0x06, 0x2c, 0x1c, 0x0d, 0x8e, 0xfb, 0x4d, 0x0b, 0xd5, 0xe1, 0x61, - 0xff, 0xfc, 0x6d, 0xf3, 0xc1, 0x8e, 0x07, 0xcd, 0xbb, 0x7d, 0x86, 0x96, 0xa1, 0x3e, 0xf4, 0xcf, - 0xba, 0xfd, 0xd1, 0xa8, 0x59, 0x43, 0x6b, 0x00, 0x6f, 0xbe, 0x1f, 0xf6, 0xfd, 0x8b, 0xc1, 0xe8, - 0xcc, 0x6f, 0x5a, 0x3b, 0x7f, 0x3c, 0x84, 0x35, 0xd3, 0x26, 0x3d, 0x22, 0x03, 0x9a, 0x0a, 0xf4, - 0x04, 0x40, 0x8f, 0x0a, 0xce, 0x83, 0x8c, 0xe8, 0xd1, 0xb5, 0x7d, 0x5b, 0x23, 0xa7, 0x41, 0x46, - 0x50, 0x17, 0x20, 0xe2, 0x24, 0x90, 0x24, 0xc6, 0x81, 0xd4, 0xe3, 0xbb, 0xfc, 0x72, 0xcb, 0x2d, - 0x9f, 0x05, 0xb7, 0x7a, 0x16, 0xdc, 0xf3, 0xea, 0x59, 0x38, 0x6c, 0x5c, 0xdf, 0xb4, 0x6b, 0xbf, - 0xfc, 0xd9, 0xb6, 0x7c, 0xdb, 0x9c, 0x7b, 0x25, 0xd1, 0x67, 0x80, 0xde, 0x11, 0x9e, 0x93, 0x54, - 0x97, 0x01, 0x1f, 0xec, 0xef, 0xe3, 0x5c, 0xe8, 0x01, 0x5e, 0xf0, 0xd7, 0x4b, 0x46, 0x39, 0x1c, - 0xec, 0xef, 0x9f, 0x0a, 0xe4, 0xc2, 0x23, 0xd3, 0xb4, 0x11, 0xcb, 0x32, 0x2a, 0x71, 0x38, 0x93, - 0x44, 0xe8, 0x49, 0x5e, 0xf0, 0x37, 0x4a, 0xaa, 0xab, 0x99, 0x43, 0x45, 0xa0, 0x23, 0xe8, 0x18, - 0xfd, 0x4f, 0x8c, 0xbf, 0xa3, 0x79, 0x82, 0x05, 0x91, 0xb8, 0xe0, 0x74, 0x1a, 0x48, 0x62, 0x0e, - 0x2f, 0xea, 0xc3, 0xff, 0x2b, 0x75, 0x6f, 0x4b, 0xd9, 0x88, 0xc8, 0x61, 0x29, 0x2a, 0x7d, 0x7a, - 0xd0, 0xbe, 0xc7, 0x47, 0xf7, 0x43, 0x6c, 0x6c, 0x96, 0xb4, 0xcd, 0xf6, 0x5d, 0x9b, 0x91, 0xd6, - 0x94, 0x2e, 0x2f, 0x00, 0xcc, 0x80, 0x62, 0x1a, 0xeb, 0x51, 0x5e, 0x3d, 0x5c, 0x9d, 0xdf, 0xb4, - 0x6d, 0x53, 0xf6, 0x41, 0xcf, 0xb7, 0x8d, 0x60, 0x10, 0xa3, 0x67, 0xd0, 0x9c, 0x08, 0xc2, 0xff, - 0x51, 0x96, 0x86, 0xbe, 0x64, 0x55, 0xe1, 0x1f, 0x8b, 0xf2, 0x14, 0xea, 0xe4, 0x8a, 0x44, 0xca, - 0x53, 0xcd, 0xaf, 0x7d, 0x08, 0xf3, 0x9b, 0xf6, 0x52, 0xff, 0x8a, 0x44, 0x83, 0x9e, 0xbf, 0xa4, - 0xa8, 0x41, 0x7c, 0x18, 0x5f, 0x7f, 0x68, 0xd5, 0x7e, 0xff, 0xd0, 0xaa, 0xfd, 0x3c, 0x6f, 0x59, - 0xd7, 0xf3, 0x96, 0xf5, 0xdb, 0xbc, 0x65, 0xfd, 0x35, 0x6f, 0x59, 0x3f, 0x7c, 0xf3, 0xdf, 0xff, - 0x44, 0xbe, 0x34, 0xbf, 0xdf, 0xd5, 0xc2, 0x25, 0xfd, 0xdd, 0x3f, 0xff, 0x3b, 0x00, 0x00, 0xff, - 0xff, 0x6b, 0x83, 0xa6, 0x5f, 0x9b, 0x06, 0x00, 0x00, + // 1072 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4b, 0x6f, 0xe3, 0x36, + 0x17, 0xb5, 0xf2, 0xb4, 0x98, 0x97, 0xc3, 0xf8, 0xc3, 0x08, 0xc9, 0x37, 0xb6, 0x91, 0x29, 0x30, + 0x19, 0x74, 0x22, 0x27, 0x69, 0x81, 0x16, 0x6d, 0xd1, 0x22, 0xb1, 0x9d, 0x89, 0x8b, 0x3c, 0x0c, + 0xd9, 0xcd, 0xf4, 0xb1, 0x20, 0xf4, 0x60, 0x64, 0x22, 0x92, 0x28, 0x90, 0x94, 0x1b, 0x67, 0x55, + 0xf4, 0x17, 0xf4, 0x67, 0x65, 0xd9, 0x65, 0x8b, 0x02, 0x69, 0xc7, 0xbf, 0xa4, 0x20, 0x45, 0x25, + 0x33, 0x41, 0xda, 0x59, 0x74, 0x65, 0xf2, 0x9c, 0xc3, 0xc3, 0x7b, 0xaf, 0x78, 0xaf, 0xc1, 0x59, + 0x48, 0xc4, 0x30, 0xf3, 0x6c, 0x9f, 0xc6, 0xcd, 0x13, 0xe2, 0x33, 0xca, 0xe9, 0x85, 0x68, 0x0e, + 0x7d, 0xce, 0x87, 0x24, 0x6e, 0xfa, 0x71, 0xd0, 0xf4, 0x69, 0x22, 0x5c, 0x92, 0x60, 0x16, 0x6c, + 0x4b, 0x6c, 0x9b, 0x65, 0xc9, 0xd0, 0xe7, 0xdb, 0xa3, 0xdd, 0x26, 0x4d, 0x05, 0xa1, 0x09, 0x6f, + 0xe6, 0x88, 0x9d, 0x32, 0x2a, 0x28, 0xac, 0xde, 0xeb, 0x6d, 0x4d, 0x8c, 0x76, 0xd7, 0xab, 0x21, + 0x0d, 0xa9, 0x12, 0x34, 0xe5, 0x2a, 0xd7, 0xae, 0xd7, 0x43, 0x4a, 0xc3, 0x08, 0x37, 0xd5, 0xce, + 0xcb, 0x2e, 0x9a, 0x82, 0xc4, 0x98, 0x0b, 0x37, 0x4e, 0x73, 0xc1, 0xe6, 0xef, 0x26, 0x98, 0x3f, + 0xcb, 0x6f, 0x81, 0x55, 0x30, 0x1b, 0x60, 0x2f, 0x0b, 0x2d, 0xa3, 0x61, 0x6c, 0x95, 0x9d, 0x7c, + 0x03, 0x0f, 0x01, 0x50, 0x0b, 0x24, 0xc6, 0x29, 0xb6, 0xa6, 0x1a, 0xc6, 0xd6, 0xf2, 0xde, 0x73, + 0xfb, 0xb1, 0x18, 0x6c, 0x6d, 0x64, 0xb7, 0xa5, 0x7e, 0x30, 0x4e, 0xb1, 0x63, 0x06, 0xc5, 0x12, + 0x3e, 0x03, 0x4b, 0x0c, 0x87, 0x84, 0x0b, 0x36, 0x46, 0x8c, 0x52, 0x61, 0x4d, 0x37, 0x8c, 0x2d, + 0xd3, 0x59, 0x2c, 0x40, 0x87, 0x52, 0x21, 0x45, 0xdc, 0x4d, 0x02, 0x8f, 0x5e, 0x21, 0x12, 0xbb, + 0x21, 0xb6, 0x66, 0x72, 0x91, 0x06, 0xbb, 0x12, 0x83, 0x2f, 0x40, 0xa5, 0x10, 0xa5, 0x91, 0x2b, + 0x2e, 0x28, 0x8b, 0xad, 0x59, 0xa5, 0x5b, 0xd1, 0x78, 0x4f, 0xc3, 0xf0, 0x07, 0xb0, 0x7a, 0xe7, + 0xc7, 0x69, 0xe4, 0xca, 0xf8, 0xac, 0x39, 0x95, 0x83, 0xfd, 0xef, 0x39, 0xf4, 0xf5, 0x8d, 0xc5, + 0x29, 0xa7, 0xb8, 0xf3, 0x0e, 0x81, 0x4d, 0x50, 0xf5, 0x28, 0x15, 0xe8, 0x82, 0x44, 0x98, 0xab, + 0x9c, 0x50, 0xea, 0x8a, 0xa1, 0x35, 0xaf, 0x62, 0x59, 0x95, 0xdc, 0xa1, 0xa4, 0x64, 0x66, 0x3d, + 0x57, 0x0c, 0xe1, 0x4b, 0x00, 0x47, 0x31, 0x4a, 0x19, 0xf5, 0x31, 0xe7, 0x94, 0x21, 0x9f, 0x66, + 0x89, 0xb0, 0xca, 0x0d, 0x63, 0x6b, 0xd6, 0xa9, 0x8c, 0xe2, 0x5e, 0x41, 0xb4, 0x24, 0x0e, 0x6d, + 0x50, 0x1d, 0xc5, 0x28, 0xc6, 0x31, 0x65, 0x63, 0xc4, 0xc9, 0x35, 0x46, 0x24, 0x41, 0xb1, 0x67, + 0x99, 0x85, 0xfe, 0x44, 0x51, 0x7d, 0x72, 0x8d, 0xbb, 0xc9, 0x89, 0x07, 0x6b, 0x00, 0xbc, 0xea, + 0x7d, 0x73, 0x7e, 0xd4, 0x96, 0x77, 0x59, 0x40, 0x05, 0xf1, 0x16, 0x02, 0xbf, 0x00, 0x1b, 0xdc, + 0x77, 0x23, 0x8c, 0xfc, 0x34, 0x43, 0x11, 0x89, 0x89, 0xe0, 0x48, 0x50, 0xa4, 0xd3, 0xb2, 0x16, + 0xd4, 0x47, 0x7f, 0xa2, 0x24, 0xad, 0x34, 0x3b, 0x56, 0x82, 0x01, 0xd5, 0x75, 0x80, 0x27, 0xe0, + 0x83, 0x00, 0x5f, 0xb8, 0x59, 0x24, 0xd0, 0x5d, 0xdd, 0x10, 0xf7, 0x99, 0x2b, 0xfc, 0xe1, 0x5d, + 0x74, 0xa1, 0x67, 0x2d, 0xaa, 0xe8, 0xea, 0x5a, 0xdb, 0x2a, 0xa4, 0xfd, 0x5c, 0x99, 0x07, 0xfb, + 0xca, 0x83, 0x5f, 0x81, 0xa7, 0x85, 0xdd, 0x28, 0x7e, 0xcc, 0x67, 0x49, 0xf9, 0x58, 0x5a, 0x74, + 0x1e, 0x3f, 0x34, 0x90, 0x2f, 0x65, 0xe8, 0x32, 0x5c, 0x9c, 0xb5, 0x96, 0x55, 0xfc, 0x8b, 0x0a, + 0xd4, 0x62, 0xd8, 0x00, 0x0b, 0xa7, 0xad, 0x1e, 0xa3, 0x57, 0xe3, 0xfd, 0x20, 0x60, 0xd6, 0x8a, + 0xaa, 0xc9, 0xdb, 0x10, 0xdc, 0x00, 0x66, 0x44, 0x43, 0x14, 0xe1, 0x11, 0x8e, 0xac, 0x8a, 0xe2, + 0xcb, 0x11, 0x0d, 0x8f, 0xe5, 0x1e, 0x7e, 0x0c, 0x9e, 0x10, 0x8a, 0x18, 0x96, 0x4f, 0x56, 0x36, + 0x0e, 0xcd, 0x84, 0x8c, 0x8e, 0x63, 0xdf, 0x5a, 0x55, 0xe1, 0xad, 0x11, 0xea, 0x48, 0x76, 0x90, + 0x93, 0xdd, 0xa4, 0x8f, 0x7d, 0xf8, 0xb3, 0x71, 0x9f, 0xdb, 0x7d, 0xa9, 0xdc, 0x24, 0xa1, 0x42, + 0xbd, 0x1b, 0x6e, 0xc1, 0xc6, 0xf4, 0xd6, 0xc2, 0xde, 0x97, 0xef, 0x6b, 0xa2, 0x77, 0x2b, 0xb8, + 0x7f, 0x6f, 0xd0, 0x49, 0x64, 0xbf, 0x6c, 0x04, 0xff, 0xac, 0x80, 0x9f, 0x00, 0x2b, 0xa1, 0x88, + 0x24, 0x43, 0xcc, 0x88, 0x40, 0x43, 0xca, 0x85, 0xca, 0xe0, 0x9a, 0x26, 0xd8, 0x5a, 0x53, 0x95, + 0xfa, 0x5f, 0x42, 0xbb, 0x39, 0x7d, 0x44, 0xb9, 0x18, 0x68, 0x12, 0x3e, 0x05, 0x80, 0xfb, 0x2c, + 0xf3, 0x50, 0x44, 0x43, 0x6e, 0x55, 0x95, 0xd4, 0x54, 0xc8, 0x31, 0x0d, 0xf9, 0xfa, 0x29, 0x68, + 0xbc, 0x2f, 0x30, 0x58, 0x01, 0xd3, 0x97, 0x78, 0xac, 0xa6, 0x88, 0xe9, 0xc8, 0xa5, 0x9c, 0x2c, + 0x23, 0x37, 0xca, 0xf2, 0xf1, 0x61, 0x3a, 0xf9, 0xe6, 0xb3, 0xa9, 0x4f, 0x8d, 0xcd, 0x17, 0xc0, + 0xbc, 0x9b, 0x16, 0xd0, 0x04, 0xb3, 0xa7, 0xbd, 0x6e, 0xaf, 0x53, 0x29, 0xc1, 0x32, 0x98, 0x39, + 0xec, 0x1e, 0x77, 0x2a, 0x06, 0x9c, 0x07, 0xd3, 0x9d, 0xc1, 0xeb, 0xca, 0xd4, 0x66, 0x13, 0x54, + 0x1e, 0x36, 0x25, 0x5c, 0x00, 0xf3, 0x3d, 0xe7, 0xac, 0xd5, 0xe9, 0xf7, 0x2b, 0x25, 0xb8, 0x0c, + 0xc0, 0xd1, 0x77, 0xbd, 0x8e, 0x73, 0xde, 0xed, 0x9f, 0x39, 0x15, 0x63, 0xf3, 0x8f, 0x69, 0xb0, + 0xac, 0x7b, 0xaa, 0x8d, 0x85, 0x4b, 0x22, 0x2e, 0xb3, 0x53, 0x73, 0x05, 0x25, 0x6e, 0x8c, 0x75, + 0x84, 0xa6, 0x42, 0x4e, 0xdd, 0x18, 0xc3, 0x16, 0x00, 0x3e, 0xc3, 0xae, 0xc0, 0x01, 0x72, 0x85, + 0x0a, 0x76, 0x61, 0x6f, 0xdd, 0xce, 0x67, 0xa8, 0x5d, 0xcc, 0x50, 0x7b, 0x50, 0xcc, 0xd0, 0x83, + 0xf2, 0xcd, 0x6d, 0xbd, 0xf4, 0xcb, 0x9f, 0x75, 0xc3, 0x31, 0xf5, 0xb9, 0x7d, 0x01, 0x3f, 0x04, + 0xf0, 0x12, 0xb3, 0x04, 0x47, 0xaa, 0xe2, 0x68, 0x77, 0x67, 0x07, 0x25, 0x5c, 0x4d, 0xbb, 0x19, + 0x67, 0x25, 0x67, 0xa4, 0xc3, 0xee, 0xce, 0xce, 0x29, 0x87, 0x36, 0x58, 0xd3, 0x1d, 0xee, 0xd3, + 0x38, 0x26, 0x02, 0x79, 0x63, 0x81, 0xb9, 0x1a, 0x7b, 0x33, 0xce, 0x6a, 0x4e, 0xb5, 0x14, 0x73, + 0x20, 0x09, 0x78, 0x08, 0x1a, 0x5a, 0xff, 0x23, 0x65, 0x97, 0x24, 0x09, 0x11, 0xc7, 0x02, 0xa5, + 0x8c, 0x8c, 0x5c, 0x81, 0xf5, 0xe1, 0x59, 0x75, 0xf8, 0xff, 0xb9, 0xee, 0x75, 0x2e, 0xeb, 0x63, + 0xd1, 0xcb, 0x45, 0xb9, 0x4f, 0x1b, 0xd4, 0x1f, 0xf1, 0x51, 0xcd, 0x13, 0x68, 0x9b, 0x39, 0x65, + 0xb3, 0xf1, 0xd0, 0xa6, 0xaf, 0x34, 0xb9, 0xcb, 0x4b, 0x00, 0xf4, 0x34, 0x43, 0x24, 0x50, 0x73, + 0x6f, 0xe9, 0x60, 0x69, 0x72, 0x5b, 0x37, 0x75, 0xd9, 0xbb, 0x6d, 0xc7, 0xd4, 0x82, 0x6e, 0x00, + 0x9f, 0x83, 0x4a, 0xc6, 0x31, 0x7b, 0xa7, 0x2c, 0x65, 0x75, 0xc9, 0x92, 0xc4, 0xef, 0x8b, 0xf2, + 0x0c, 0xcc, 0xe3, 0x2b, 0xec, 0x4b, 0x4f, 0x39, 0xec, 0xcc, 0x03, 0x30, 0xb9, 0xad, 0xcf, 0x75, + 0xae, 0xb0, 0xdf, 0x6d, 0x3b, 0x73, 0x92, 0xea, 0x06, 0x07, 0xc1, 0xcd, 0x9b, 0x5a, 0xe9, 0xb7, + 0x37, 0xb5, 0xd2, 0x4f, 0x93, 0x9a, 0x71, 0x33, 0xa9, 0x19, 0xbf, 0x4e, 0x6a, 0xc6, 0x5f, 0x93, + 0x9a, 0xf1, 0xfd, 0xd7, 0xff, 0xfd, 0x1f, 0xf7, 0x73, 0xfd, 0xfb, 0x6d, 0xc9, 0x9b, 0x53, 0xdf, + 0xfd, 0xa3, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xba, 0x6d, 0x7b, 0x04, 0xc8, 0x07, 0x00, 0x00, } func (m *Options) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -316,131 +333,189 @@ func (m *Options) Marshal() (dAtA []byte, err error) { } func (m *Options) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Options) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if m.Debug { - dAtA[i] = 0x8 - i++ - if m.Debug { + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.ScrubLogs { + i-- + if m.ScrubLogs { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - } - if m.DebugType != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(m.DebugType)) - } - if len(m.RegistryRoot) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(len(m.RegistryRoot))) - i += copy(dAtA[i:], m.RegistryRoot) - } - if len(m.SandboxImage) > 0 { - dAtA[i] = 0x22 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(len(m.SandboxImage))) - i += copy(dAtA[i:], m.SandboxImage) - } - if len(m.SandboxPlatform) > 0 { - dAtA[i] = 0x2a - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(len(m.SandboxPlatform))) - i += copy(dAtA[i:], m.SandboxPlatform) - } - if m.SandboxIsolation != 0 { - dAtA[i] = 0x30 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(m.SandboxIsolation)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa0 } - if len(m.BootFilesRootPath) > 0 { - dAtA[i] = 0x3a - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(len(m.BootFilesRootPath))) - i += copy(dAtA[i:], m.BootFilesRootPath) + if m.NoInheritHostTimezone { + i-- + if m.NoInheritHostTimezone { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x98 + } + if len(m.DefaultContainerAnnotations) > 0 { + for k := range m.DefaultContainerAnnotations { + v := m.DefaultContainerAnnotations[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintRunhcs(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintRunhcs(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintRunhcs(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } } - if m.VmProcessorCount != 0 { - dAtA[i] = 0x40 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(m.VmProcessorCount)) + if m.IoRetryTimeoutInSec != 0 { + i = encodeVarintRunhcs(dAtA, i, uint64(m.IoRetryTimeoutInSec)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 } - if m.VmMemorySizeInMb != 0 { - dAtA[i] = 0x48 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(m.VmMemorySizeInMb)) + if len(m.LogLevel) > 0 { + i -= len(m.LogLevel) + copy(dAtA[i:], m.LogLevel) + i = encodeVarintRunhcs(dAtA, i, uint64(len(m.LogLevel))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 } - if len(m.GPUVHDPath) > 0 { - dAtA[i] = 0x52 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(len(m.GPUVHDPath))) - i += copy(dAtA[i:], m.GPUVHDPath) + if len(m.NCProxyAddr) > 0 { + i -= len(m.NCProxyAddr) + copy(dAtA[i:], m.NCProxyAddr) + i = encodeVarintRunhcs(dAtA, i, uint64(len(m.NCProxyAddr))) + i-- + dAtA[i] = 0x7a } - if m.ScaleCpuLimitsToSandbox { - dAtA[i] = 0x58 - i++ - if m.ScaleCpuLimitsToSandbox { + if m.ShareScratch { + i-- + if m.ShareScratch { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ - } - if m.DefaultContainerScratchSizeInGb != 0 { - dAtA[i] = 0x60 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(m.DefaultContainerScratchSizeInGb)) + i-- + dAtA[i] = 0x70 } if m.DefaultVmScratchSizeInGb != 0 { - dAtA[i] = 0x68 - i++ i = encodeVarintRunhcs(dAtA, i, uint64(m.DefaultVmScratchSizeInGb)) + i-- + dAtA[i] = 0x68 } - if m.ShareScratch { - dAtA[i] = 0x70 - i++ - if m.ShareScratch { + if m.DefaultContainerScratchSizeInGb != 0 { + i = encodeVarintRunhcs(dAtA, i, uint64(m.DefaultContainerScratchSizeInGb)) + i-- + dAtA[i] = 0x60 + } + if m.ScaleCpuLimitsToSandbox { + i-- + if m.ScaleCpuLimitsToSandbox { dAtA[i] = 1 } else { dAtA[i] = 0 } - i++ + i-- + dAtA[i] = 0x58 } - if len(m.NCProxyAddr) > 0 { - dAtA[i] = 0x7a - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(len(m.NCProxyAddr))) - i += copy(dAtA[i:], m.NCProxyAddr) + if len(m.GPUVHDPath) > 0 { + i -= len(m.GPUVHDPath) + copy(dAtA[i:], m.GPUVHDPath) + i = encodeVarintRunhcs(dAtA, i, uint64(len(m.GPUVHDPath))) + i-- + dAtA[i] = 0x52 } - if len(m.LogLevel) > 0 { - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(len(m.LogLevel))) - i += copy(dAtA[i:], m.LogLevel) + if m.VmMemorySizeInMb != 0 { + i = encodeVarintRunhcs(dAtA, i, uint64(m.VmMemorySizeInMb)) + i-- + dAtA[i] = 0x48 } - if m.IoRetryTimeoutInSec != 0 { - dAtA[i] = 0x88 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(m.IoRetryTimeoutInSec)) + if m.VmProcessorCount != 0 { + i = encodeVarintRunhcs(dAtA, i, uint64(m.VmProcessorCount)) + i-- + dAtA[i] = 0x40 } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + if len(m.BootFilesRootPath) > 0 { + i -= len(m.BootFilesRootPath) + copy(dAtA[i:], m.BootFilesRootPath) + i = encodeVarintRunhcs(dAtA, i, uint64(len(m.BootFilesRootPath))) + i-- + dAtA[i] = 0x3a + } + if m.SandboxIsolation != 0 { + i = encodeVarintRunhcs(dAtA, i, uint64(m.SandboxIsolation)) + i-- + dAtA[i] = 0x30 + } + if len(m.SandboxPlatform) > 0 { + i -= len(m.SandboxPlatform) + copy(dAtA[i:], m.SandboxPlatform) + i = encodeVarintRunhcs(dAtA, i, uint64(len(m.SandboxPlatform))) + i-- + dAtA[i] = 0x2a + } + if len(m.SandboxImage) > 0 { + i -= len(m.SandboxImage) + copy(dAtA[i:], m.SandboxImage) + i = encodeVarintRunhcs(dAtA, i, uint64(len(m.SandboxImage))) + i-- + dAtA[i] = 0x22 + } + if len(m.RegistryRoot) > 0 { + i -= len(m.RegistryRoot) + copy(dAtA[i:], m.RegistryRoot) + i = encodeVarintRunhcs(dAtA, i, uint64(len(m.RegistryRoot))) + i-- + dAtA[i] = 0x1a + } + if m.DebugType != 0 { + i = encodeVarintRunhcs(dAtA, i, uint64(m.DebugType)) + i-- + dAtA[i] = 0x10 + } + if m.Debug { + i-- + if m.Debug { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 } - return i, nil + return len(dAtA) - i, nil } func (m *ProcessDetails) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } @@ -448,74 +523,84 @@ func (m *ProcessDetails) Marshal() (dAtA []byte, err error) { } func (m *ProcessDetails) MarshalTo(dAtA []byte) (int, error) { - var i int + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProcessDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) _ = i var l int _ = l - if len(m.ImageName) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(len(m.ImageName))) - i += copy(dAtA[i:], m.ImageName) - } - dAtA[i] = 0x12 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt))) - n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:]) - if err != nil { - return 0, err + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) } - i += n1 - if m.KernelTime_100Ns != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(m.KernelTime_100Ns)) + if len(m.ExecID) > 0 { + i -= len(m.ExecID) + copy(dAtA[i:], m.ExecID) + i = encodeVarintRunhcs(dAtA, i, uint64(len(m.ExecID))) + i-- + dAtA[i] = 0x4a } - if m.MemoryCommitBytes != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(m.MemoryCommitBytes)) + if m.UserTime_100Ns != 0 { + i = encodeVarintRunhcs(dAtA, i, uint64(m.UserTime_100Ns)) + i-- + dAtA[i] = 0x40 } - if m.MemoryWorkingSetPrivateBytes != 0 { - dAtA[i] = 0x28 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(m.MemoryWorkingSetPrivateBytes)) + if m.ProcessID != 0 { + i = encodeVarintRunhcs(dAtA, i, uint64(m.ProcessID)) + i-- + dAtA[i] = 0x38 } if m.MemoryWorkingSetSharedBytes != 0 { - dAtA[i] = 0x30 - i++ i = encodeVarintRunhcs(dAtA, i, uint64(m.MemoryWorkingSetSharedBytes)) + i-- + dAtA[i] = 0x30 } - if m.ProcessID != 0 { - dAtA[i] = 0x38 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(m.ProcessID)) + if m.MemoryWorkingSetPrivateBytes != 0 { + i = encodeVarintRunhcs(dAtA, i, uint64(m.MemoryWorkingSetPrivateBytes)) + i-- + dAtA[i] = 0x28 } - if m.UserTime_100Ns != 0 { - dAtA[i] = 0x40 - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(m.UserTime_100Ns)) + if m.MemoryCommitBytes != 0 { + i = encodeVarintRunhcs(dAtA, i, uint64(m.MemoryCommitBytes)) + i-- + dAtA[i] = 0x20 } - if len(m.ExecID) > 0 { - dAtA[i] = 0x4a - i++ - i = encodeVarintRunhcs(dAtA, i, uint64(len(m.ExecID))) - i += copy(dAtA[i:], m.ExecID) + if m.KernelTime_100Ns != 0 { + i = encodeVarintRunhcs(dAtA, i, uint64(m.KernelTime_100Ns)) + i-- + dAtA[i] = 0x18 } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) + n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt):]) + if err1 != nil { + return 0, err1 + } + i -= n1 + i = encodeVarintRunhcs(dAtA, i, uint64(n1)) + i-- + dAtA[i] = 0x12 + if len(m.ImageName) > 0 { + i -= len(m.ImageName) + copy(dAtA[i:], m.ImageName) + i = encodeVarintRunhcs(dAtA, i, uint64(len(m.ImageName))) + i-- + dAtA[i] = 0xa } - return i, nil + return len(dAtA) - i, nil } func encodeVarintRunhcs(dAtA []byte, offset int, v uint64) int { + offset -= sovRunhcs(v) + base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return offset + 1 + return base } func (m *Options) Size() (n int) { if m == nil { @@ -581,6 +666,20 @@ func (m *Options) Size() (n int) { if m.IoRetryTimeoutInSec != 0 { n += 2 + sovRunhcs(uint64(m.IoRetryTimeoutInSec)) } + if len(m.DefaultContainerAnnotations) > 0 { + for k, v := range m.DefaultContainerAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovRunhcs(uint64(len(k))) + 1 + len(v) + sovRunhcs(uint64(len(v))) + n += mapEntrySize + 2 + sovRunhcs(uint64(mapEntrySize)) + } + } + if m.NoInheritHostTimezone { + n += 3 + } + if m.ScrubLogs { + n += 3 + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -628,14 +727,7 @@ func (m *ProcessDetails) Size() (n int) { } func sovRunhcs(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + return (math_bits.Len64(x|1) + 6) / 7 } func sozRunhcs(x uint64) (n int) { return sovRunhcs(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -644,6 +736,16 @@ func (this *Options) String() string { if this == nil { return "nil" } + keysForDefaultContainerAnnotations := make([]string, 0, len(this.DefaultContainerAnnotations)) + for k, _ := range this.DefaultContainerAnnotations { + keysForDefaultContainerAnnotations = append(keysForDefaultContainerAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultContainerAnnotations) + mapStringForDefaultContainerAnnotations := "map[string]string{" + for _, k := range keysForDefaultContainerAnnotations { + mapStringForDefaultContainerAnnotations += fmt.Sprintf("%v: %v,", k, this.DefaultContainerAnnotations[k]) + } + mapStringForDefaultContainerAnnotations += "}" s := strings.Join([]string{`&Options{`, `Debug:` + fmt.Sprintf("%v", this.Debug) + `,`, `DebugType:` + fmt.Sprintf("%v", this.DebugType) + `,`, @@ -662,6 +764,9 @@ func (this *Options) String() string { `NCProxyAddr:` + fmt.Sprintf("%v", this.NCProxyAddr) + `,`, `LogLevel:` + fmt.Sprintf("%v", this.LogLevel) + `,`, `IoRetryTimeoutInSec:` + fmt.Sprintf("%v", this.IoRetryTimeoutInSec) + `,`, + `DefaultContainerAnnotations:` + mapStringForDefaultContainerAnnotations + `,`, + `NoInheritHostTimezone:` + fmt.Sprintf("%v", this.NoInheritHostTimezone) + `,`, + `ScrubLogs:` + fmt.Sprintf("%v", this.ScrubLogs) + `,`, `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") @@ -673,7 +778,7 @@ func (this *ProcessDetails) String() string { } s := strings.Join([]string{`&ProcessDetails{`, `ImageName:` + fmt.Sprintf("%v", this.ImageName) + `,`, - `CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, + `CreatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CreatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, `KernelTime_100Ns:` + fmt.Sprintf("%v", this.KernelTime_100Ns) + `,`, `MemoryCommitBytes:` + fmt.Sprintf("%v", this.MemoryCommitBytes) + `,`, `MemoryWorkingSetPrivateBytes:` + fmt.Sprintf("%v", this.MemoryWorkingSetPrivateBytes) + `,`, @@ -1140,16 +1245,180 @@ func (m *Options) Unmarshal(dAtA []byte) error { break } } + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultContainerAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRunhcs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRunhcs + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRunhcs + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DefaultContainerAnnotations == nil { + m.DefaultContainerAnnotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRunhcs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRunhcs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthRunhcs + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthRunhcs + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRunhcs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthRunhcs + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthRunhcs + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipRunhcs(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRunhcs + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.DefaultContainerAnnotations[mapkey] = mapvalue + iNdEx = postIndex + case 19: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoInheritHostTimezone", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRunhcs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NoInheritHostTimezone = bool(v != 0) + case 20: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ScrubLogs", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRunhcs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ScrubLogs = bool(v != 0) default: iNdEx = preIndex skippy, err := skipRunhcs(dAtA[iNdEx:]) if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthRunhcs - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthRunhcs } if (iNdEx + skippy) > l { @@ -1411,10 +1680,7 @@ func (m *ProcessDetails) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthRunhcs - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthRunhcs } if (iNdEx + skippy) > l { @@ -1433,6 +1699,7 @@ func (m *ProcessDetails) Unmarshal(dAtA []byte) error { func skipRunhcs(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 + depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1464,10 +1731,8 @@ func skipRunhcs(dAtA []byte) (n int, err error) { break } } - return iNdEx, nil case 1: iNdEx += 8 - return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -1488,55 +1753,30 @@ func skipRunhcs(dAtA []byte) (n int, err error) { return 0, ErrInvalidLengthRunhcs } iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthRunhcs - } - return iNdEx, nil case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRunhcs - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipRunhcs(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthRunhcs - } - } - return iNdEx, nil + depth++ case 4: - return iNdEx, nil + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupRunhcs + } + depth-- case 5: iNdEx += 4 - return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } + if iNdEx < 0 { + return 0, ErrInvalidLengthRunhcs + } + if depth == 0 { + return iNdEx, nil + } } - panic("unreachable") + return 0, io.ErrUnexpectedEOF } var ( - ErrInvalidLengthRunhcs = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowRunhcs = fmt.Errorf("proto: integer overflow") + ErrInvalidLengthRunhcs = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRunhcs = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupRunhcs = fmt.Errorf("proto: unexpected end of group") ) diff --git a/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.proto b/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.proto index 60c89adbde85f..1124dd201ff48 100644 --- a/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.proto +++ b/vendor/github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options/runhcs.proto @@ -98,6 +98,16 @@ message Options { // The typical example is if Containerd has restarted but is expected to come back online. A 0 for this field is interpreted as an infinite // timeout. int32 io_retry_timeout_in_sec = 17; + + // default_container_annotations specifies a set of annotations that should be set for every workload container + map default_container_annotations = 18; + + // no_inherit_host_timezone specifies to skip inheriting the hosts time zone for WCOW UVMs and instead default to + // UTC. + bool no_inherit_host_timezone = 19; + + // scrub_logs enables removing environment variables and other potentially sensitive information from logs + bool scrub_logs = 20; } // ProcessDetails contains additional information about a process. This is the additional diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go b/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go index 7f1f2823ddce3..54c4b3bc4abfc 100644 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/attach.go @@ -1,3 +1,5 @@ +//go:build windows + package computestorage import ( @@ -17,8 +19,8 @@ import ( // // `layerData` is the parent read-only layer data. func AttachLayerStorageFilter(ctx context.Context, layerPath string, layerData LayerData) (err error) { - title := "hcsshim.AttachLayerStorageFilter" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + title := "hcsshim::AttachLayerStorageFilter" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes( diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go b/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go index 8e28e6c504693..5058d3b55eae5 100644 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/destroy.go @@ -1,3 +1,5 @@ +//go:build windows + package computestorage import ( @@ -12,8 +14,8 @@ import ( // // `layerPath` is a path to a directory containing the layer to export. func DestroyLayer(ctx context.Context, layerPath string) (err error) { - title := "hcsshim.DestroyLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + title := "hcsshim::DestroyLayer" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes(trace.StringAttribute("layerPath", layerPath)) diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go b/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go index 435473257e30c..daf1bfff2051f 100644 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/detach.go @@ -1,3 +1,5 @@ +//go:build windows + package computestorage import ( @@ -12,8 +14,8 @@ import ( // // `layerPath` is a path to a directory containing the layer to export. func DetachLayerStorageFilter(ctx context.Context, layerPath string) (err error) { - title := "hcsshim.DetachLayerStorageFilter" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + title := "hcsshim::DetachLayerStorageFilter" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes(trace.StringAttribute("layerPath", layerPath)) diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/export.go b/vendor/github.com/Microsoft/hcsshim/computestorage/export.go index a1b12dd129286..c6370a5c9adf0 100644 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/export.go +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/export.go @@ -1,3 +1,5 @@ +//go:build windows + package computestorage import ( @@ -19,8 +21,8 @@ import ( // // `options` are the export options applied to the exported layer. func ExportLayer(ctx context.Context, layerPath, exportFolderPath string, layerData LayerData, options ExportLayerOptions) (err error) { - title := "hcsshim.ExportLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + title := "hcsshim::ExportLayer" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes( @@ -28,17 +30,17 @@ func ExportLayer(ctx context.Context, layerPath, exportFolderPath string, layerD trace.StringAttribute("exportFolderPath", exportFolderPath), ) - ldbytes, err := json.Marshal(layerData) + ldBytes, err := json.Marshal(layerData) if err != nil { return err } - obytes, err := json.Marshal(options) + oBytes, err := json.Marshal(options) if err != nil { return err } - err = hcsExportLayer(layerPath, exportFolderPath, string(ldbytes), string(obytes)) + err = hcsExportLayer(layerPath, exportFolderPath, string(ldBytes), string(oBytes)) if err != nil { return errors.Wrap(err, "failed to export layer") } diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/format.go b/vendor/github.com/Microsoft/hcsshim/computestorage/format.go index 83c0fa33f0fce..2140e5c9fc131 100644 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/format.go +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/format.go @@ -1,3 +1,5 @@ +//go:build windows + package computestorage import ( @@ -5,16 +7,20 @@ import ( "github.com/Microsoft/hcsshim/internal/oc" "github.com/pkg/errors" - "go.opencensus.io/trace" "golang.org/x/sys/windows" ) // FormatWritableLayerVhd formats a virtual disk for use as a writable container layer. // // If the VHD is not mounted it will be temporarily mounted. +// +// NOTE: This API had a breaking change in the operating system after Windows Server 2019. +// On ws2019 the API expects to get passed a file handle from CreateFile for the vhd that +// the caller wants to format. On > ws2019, its expected that the caller passes a vhd handle +// that can be obtained from the virtdisk APIs. func FormatWritableLayerVhd(ctx context.Context, vhdHandle windows.Handle) (err error) { - title := "hcsshim.FormatWritableLayerVhd" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + title := "hcsshim::FormatWritableLayerVhd" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck defer span.End() defer func() { oc.SetSpanStatus(span, err) }() diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go b/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go index 87fee452cd3f8..c3608dcec8f2c 100644 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go @@ -1,3 +1,5 @@ +//go:build windows + package computestorage import ( @@ -6,10 +8,12 @@ import ( "path/filepath" "syscall" - "github.com/Microsoft/go-winio/pkg/security" "github.com/Microsoft/go-winio/vhd" + "github.com/Microsoft/hcsshim/internal/memory" "github.com/pkg/errors" "golang.org/x/sys/windows" + + "github.com/Microsoft/hcsshim/internal/security" ) const defaultVHDXBlockSizeInMB = 1 @@ -59,8 +63,8 @@ func SetupContainerBaseLayer(ctx context.Context, layerPath, baseVhdPath, diffVh createParams := &vhd.CreateVirtualDiskParameters{ Version: 2, Version2: vhd.CreateVersion2{ - MaximumSize: sizeInGB * 1024 * 1024 * 1024, - BlockSizeInBytes: defaultVHDXBlockSizeInMB * 1024 * 1024, + MaximumSize: sizeInGB * memory.GiB, + BlockSizeInBytes: defaultVHDXBlockSizeInMB * memory.MiB, }, } handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams) @@ -135,8 +139,8 @@ func SetupUtilityVMBaseLayer(ctx context.Context, uvmPath, baseVhdPath, diffVhdP createParams := &vhd.CreateVirtualDiskParameters{ Version: 2, Version2: vhd.CreateVersion2{ - MaximumSize: sizeInGB * 1024 * 1024 * 1024, - BlockSizeInBytes: defaultVHDXBlockSizeInMB * 1024 * 1024, + MaximumSize: sizeInGB * memory.GiB, + BlockSizeInBytes: defaultVHDXBlockSizeInMB * memory.MiB, }, } handle, err := vhd.CreateVirtualDisk(baseVhdPath, vhd.VirtualDiskAccessNone, vhd.CreateVirtualDiskFlagNone, createParams) diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/import.go b/vendor/github.com/Microsoft/hcsshim/computestorage/import.go index 0c61dab329185..e1c87416a3462 100644 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/import.go +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/import.go @@ -1,3 +1,5 @@ +//go:build windows + package computestorage import ( @@ -19,8 +21,8 @@ import ( // // `layerData` is the parent layer data. func ImportLayer(ctx context.Context, layerPath, sourceFolderPath string, layerData LayerData) (err error) { - title := "hcsshim.ImportLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + title := "hcsshim::ImportLayer" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes( diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go b/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go index 53ed8ea6edaaf..d0c6216056486 100644 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/initialize.go @@ -1,3 +1,5 @@ +//go:build windows + package computestorage import ( @@ -16,8 +18,8 @@ import ( // // `layerData` is the parent read-only layer data. func InitializeWritableLayer(ctx context.Context, layerPath string, layerData LayerData) (err error) { - title := "hcsshim.InitializeWritableLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + title := "hcsshim::InitializeWritableLayer" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes( diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go b/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go index fcdbbef81437d..4f4d8ebf2f9fa 100644 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/mount.go @@ -1,3 +1,5 @@ +//go:build windows + package computestorage import ( @@ -6,14 +8,13 @@ import ( "github.com/Microsoft/hcsshim/internal/interop" "github.com/Microsoft/hcsshim/internal/oc" "github.com/pkg/errors" - "go.opencensus.io/trace" "golang.org/x/sys/windows" ) // GetLayerVhdMountPath returns the volume path for a virtual disk of a writable container layer. func GetLayerVhdMountPath(ctx context.Context, vhdHandle windows.Handle) (path string, err error) { - title := "hcsshim.GetLayerVhdMountPath" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + title := "hcsshim::GetLayerVhdMountPath" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck defer span.End() defer func() { oc.SetSpanStatus(span, err) }() diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go b/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go index 06aaf841e8f28..1c685aed0ae7e 100644 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/setup.go @@ -1,3 +1,5 @@ +//go:build windows + package computestorage import ( @@ -21,8 +23,8 @@ import ( // // `options` are the options applied while processing the layer. func SetupBaseOSLayer(ctx context.Context, layerPath string, vhdHandle windows.Handle, options OsLayerOptions) (err error) { - title := "hcsshim.SetupBaseOSLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + title := "hcsshim::SetupBaseOSLayer" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes( @@ -48,12 +50,16 @@ func SetupBaseOSLayer(ctx context.Context, layerPath string, vhdHandle windows.H // `volumePath` is the path to the volume to be used for setup. // // `options` are the options applied while processing the layer. +// +// NOTE: This API is only available on builds of Windows greater than 19645. Inside we +// check if the hosts build has the API available by using 'GetVersion' which requires +// the calling application to be manifested. https://docs.microsoft.com/en-us/windows/win32/sbscs/manifests func SetupBaseOSVolume(ctx context.Context, layerPath, volumePath string, options OsLayerOptions) (err error) { if osversion.Build() < 19645 { return errors.New("SetupBaseOSVolume is not present on builds older than 19645") } - title := "hcsshim.SetupBaseOSVolume" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + title := "hcsshim::SetupBaseOSVolume" + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes( diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go b/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go index 95aff9c184838..82d68cb8b1e73 100644 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go @@ -7,7 +7,7 @@ import ( hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" ) -//go:generate go run ../mksyscall_windows.go -output zsyscall_windows.go storage.go +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go storage.go //sys hcsImportLayer(layerPath string, sourceFolderPath string, layerData string) (hr error) = computestorage.HcsImportLayer? //sys hcsExportLayer(layerPath string, exportFolderPath string, layerData string, options string) (hr error) = computestorage.HcsExportLayer? @@ -20,10 +20,13 @@ import ( //sys hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) = computestorage.HcsGetLayerVhdMountPath? //sys hcsSetupBaseOSVolume(layerPath string, volumePath string, options string) (hr error) = computestorage.HcsSetupBaseOSVolume? +type Version = hcsschema.Version +type Layer = hcsschema.Layer + // LayerData is the data used to describe parent layer information. type LayerData struct { - SchemaVersion hcsschema.Version `json:"SchemaVersion,omitempty"` - Layers []hcsschema.Layer `json:"Layers,omitempty"` + SchemaVersion Version `json:"SchemaVersion,omitempty"` + Layers []Layer `json:"Layers,omitempty"` } // ExportLayerOptions are the set of options that are used with the `computestorage.HcsExportLayer` syscall. diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go index 4f95180674340..9cf479181a70d 100644 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go @@ -1,4 +1,6 @@ -// Code generated mksyscall_windows.exe DO NOT EDIT +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. package computestorage @@ -19,6 +21,7 @@ const ( var ( errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL ) // errnoErr returns common boxed Errno values, to prevent @@ -26,7 +29,7 @@ var ( func errnoErr(e syscall.Errno) error { switch e { case 0: - return nil + return errERROR_EINVAL case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } @@ -39,42 +42,86 @@ func errnoErr(e syscall.Errno) error { var ( modcomputestorage = windows.NewLazySystemDLL("computestorage.dll") - procHcsImportLayer = modcomputestorage.NewProc("HcsImportLayer") - procHcsExportLayer = modcomputestorage.NewProc("HcsExportLayer") - procHcsDestoryLayer = modcomputestorage.NewProc("HcsDestoryLayer") - procHcsSetupBaseOSLayer = modcomputestorage.NewProc("HcsSetupBaseOSLayer") - procHcsInitializeWritableLayer = modcomputestorage.NewProc("HcsInitializeWritableLayer") procHcsAttachLayerStorageFilter = modcomputestorage.NewProc("HcsAttachLayerStorageFilter") + procHcsDestoryLayer = modcomputestorage.NewProc("HcsDestoryLayer") procHcsDetachLayerStorageFilter = modcomputestorage.NewProc("HcsDetachLayerStorageFilter") + procHcsExportLayer = modcomputestorage.NewProc("HcsExportLayer") procHcsFormatWritableLayerVhd = modcomputestorage.NewProc("HcsFormatWritableLayerVhd") procHcsGetLayerVhdMountPath = modcomputestorage.NewProc("HcsGetLayerVhdMountPath") + procHcsImportLayer = modcomputestorage.NewProc("HcsImportLayer") + procHcsInitializeWritableLayer = modcomputestorage.NewProc("HcsInitializeWritableLayer") + procHcsSetupBaseOSLayer = modcomputestorage.NewProc("HcsSetupBaseOSLayer") procHcsSetupBaseOSVolume = modcomputestorage.NewProc("HcsSetupBaseOSVolume") ) -func hcsImportLayer(layerPath string, sourceFolderPath string, layerData string) (hr error) { +func hcsAttachLayerStorageFilter(layerPath string, layerData string) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(layerPath) if hr != nil { return } var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(sourceFolderPath) + _p1, hr = syscall.UTF16PtrFromString(layerData) if hr != nil { return } - var _p2 *uint16 - _p2, hr = syscall.UTF16PtrFromString(layerData) + return _hcsAttachLayerStorageFilter(_p0, _p1) +} + +func _hcsAttachLayerStorageFilter(layerPath *uint16, layerData *uint16) (hr error) { + hr = procHcsAttachLayerStorageFilter.Find() if hr != nil { return } - return _hcsImportLayer(_p0, _p1, _p2) + r0, _, _ := syscall.Syscall(procHcsAttachLayerStorageFilter.Addr(), 2, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(layerData)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return } -func _hcsImportLayer(layerPath *uint16, sourceFolderPath *uint16, layerData *uint16) (hr error) { - if hr = procHcsImportLayer.Find(); hr != nil { +func hcsDestroyLayer(layerPath string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsImportLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(sourceFolderPath)), uintptr(unsafe.Pointer(layerData))) + return _hcsDestroyLayer(_p0) +} + +func _hcsDestroyLayer(layerPath *uint16) (hr error) { + hr = procHcsDestoryLayer.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsDestoryLayer.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcsDetachLayerStorageFilter(layerPath string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(layerPath) + if hr != nil { + return + } + return _hcsDetachLayerStorageFilter(_p0) +} + +func _hcsDetachLayerStorageFilter(layerPath *uint16) (hr error) { + hr = procHcsDetachLayerStorageFilter.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsDetachLayerStorageFilter.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -109,7 +156,8 @@ func hcsExportLayer(layerPath string, exportFolderPath string, layerData string, } func _hcsExportLayer(layerPath *uint16, exportFolderPath *uint16, layerData *uint16, options *uint16) (hr error) { - if hr = procHcsExportLayer.Find(); hr != nil { + hr = procHcsExportLayer.Find() + if hr != nil { return } r0, _, _ := syscall.Syscall6(procHcsExportLayer.Addr(), 4, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(exportFolderPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options)), 0, 0) @@ -122,20 +170,27 @@ func _hcsExportLayer(layerPath *uint16, exportFolderPath *uint16, layerData *uin return } -func hcsDestroyLayer(layerPath string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(layerPath) +func hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) { + hr = procHcsFormatWritableLayerVhd.Find() if hr != nil { return } - return _hcsDestroyLayer(_p0) + r0, _, _ := syscall.Syscall(procHcsFormatWritableLayerVhd.Addr(), 1, uintptr(handle), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return } -func _hcsDestroyLayer(layerPath *uint16) (hr error) { - if hr = procHcsDestoryLayer.Find(); hr != nil { +func hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) { + hr = procHcsGetLayerVhdMountPath.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsDestoryLayer.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0) + r0, _, _ := syscall.Syscall(procHcsGetLayerVhdMountPath.Addr(), 2, uintptr(vhdHandle), uintptr(unsafe.Pointer(mountPath)), 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -145,25 +200,31 @@ func _hcsDestroyLayer(layerPath *uint16) (hr error) { return } -func hcsSetupBaseOSLayer(layerPath string, handle windows.Handle, options string) (hr error) { +func hcsImportLayer(layerPath string, sourceFolderPath string, layerData string) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(layerPath) if hr != nil { return } var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(options) + _p1, hr = syscall.UTF16PtrFromString(sourceFolderPath) if hr != nil { return } - return _hcsSetupBaseOSLayer(_p0, handle, _p1) + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(layerData) + if hr != nil { + return + } + return _hcsImportLayer(_p0, _p1, _p2) } -func _hcsSetupBaseOSLayer(layerPath *uint16, handle windows.Handle, options *uint16) (hr error) { - if hr = procHcsSetupBaseOSLayer.Find(); hr != nil { +func _hcsImportLayer(layerPath *uint16, sourceFolderPath *uint16, layerData *uint16) (hr error) { + hr = procHcsImportLayer.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsSetupBaseOSLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(handle), uintptr(unsafe.Pointer(options))) + r0, _, _ := syscall.Syscall(procHcsImportLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(sourceFolderPath)), uintptr(unsafe.Pointer(layerData))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -193,7 +254,8 @@ func hcsInitializeWritableLayer(writableLayerPath string, layerData string, opti } func _hcsInitializeWritableLayer(writableLayerPath *uint16, layerData *uint16, options *uint16) (hr error) { - if hr = procHcsInitializeWritableLayer.Find(); hr != nil { + hr = procHcsInitializeWritableLayer.Find() + if hr != nil { return } r0, _, _ := syscall.Syscall(procHcsInitializeWritableLayer.Addr(), 3, uintptr(unsafe.Pointer(writableLayerPath)), uintptr(unsafe.Pointer(layerData)), uintptr(unsafe.Pointer(options))) @@ -206,76 +268,26 @@ func _hcsInitializeWritableLayer(writableLayerPath *uint16, layerData *uint16, o return } -func hcsAttachLayerStorageFilter(layerPath string, layerData string) (hr error) { +func hcsSetupBaseOSLayer(layerPath string, handle windows.Handle, options string) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(layerPath) if hr != nil { return } var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(layerData) + _p1, hr = syscall.UTF16PtrFromString(options) if hr != nil { return } - return _hcsAttachLayerStorageFilter(_p0, _p1) -} - -func _hcsAttachLayerStorageFilter(layerPath *uint16, layerData *uint16) (hr error) { - if hr = procHcsAttachLayerStorageFilter.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsAttachLayerStorageFilter.Addr(), 2, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(layerData)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return + return _hcsSetupBaseOSLayer(_p0, handle, _p1) } -func hcsDetachLayerStorageFilter(layerPath string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(layerPath) +func _hcsSetupBaseOSLayer(layerPath *uint16, handle windows.Handle, options *uint16) (hr error) { + hr = procHcsSetupBaseOSLayer.Find() if hr != nil { return } - return _hcsDetachLayerStorageFilter(_p0) -} - -func _hcsDetachLayerStorageFilter(layerPath *uint16) (hr error) { - if hr = procHcsDetachLayerStorageFilter.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsDetachLayerStorageFilter.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) { - if hr = procHcsFormatWritableLayerVhd.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsFormatWritableLayerVhd.Addr(), 1, uintptr(handle), 0, 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - -func hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) { - if hr = procHcsGetLayerVhdMountPath.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsGetLayerVhdMountPath.Addr(), 2, uintptr(vhdHandle), uintptr(unsafe.Pointer(mountPath)), 0) + r0, _, _ := syscall.Syscall(procHcsSetupBaseOSLayer.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(handle), uintptr(unsafe.Pointer(options))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -305,7 +317,8 @@ func hcsSetupBaseOSVolume(layerPath string, volumePath string, options string) ( } func _hcsSetupBaseOSVolume(layerPath *uint16, volumePath *uint16, options *uint16) (hr error) { - if hr = procHcsSetupBaseOSVolume.Find(); hr != nil { + hr = procHcsSetupBaseOSVolume.Find() + if hr != nil { return } r0, _, _ := syscall.Syscall(procHcsSetupBaseOSVolume.Addr(), 3, uintptr(unsafe.Pointer(layerPath)), uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(options))) diff --git a/vendor/github.com/Microsoft/hcsshim/container.go b/vendor/github.com/Microsoft/hcsshim/container.go index bfd722898e948..c8f09f88b9fc3 100644 --- a/vendor/github.com/Microsoft/hcsshim/container.go +++ b/vendor/github.com/Microsoft/hcsshim/container.go @@ -1,3 +1,5 @@ +//go:build windows + package hcsshim import ( @@ -60,7 +62,7 @@ type container struct { waitCh chan struct{} } -// createComputeSystemAdditionalJSON is read from the environment at initialisation +// createContainerAdditionalJSON is read from the environment at initialization // time. It allows an environment variable to define additional JSON which // is merged in the CreateComputeSystem call to HCS. var createContainerAdditionalJSON []byte diff --git a/vendor/github.com/Microsoft/hcsshim/errors.go b/vendor/github.com/Microsoft/hcsshim/errors.go index f367022e712ec..594bbfb7a89bc 100644 --- a/vendor/github.com/Microsoft/hcsshim/errors.go +++ b/vendor/github.com/Microsoft/hcsshim/errors.go @@ -1,3 +1,5 @@ +//go:build windows + package hcsshim import ( @@ -50,6 +52,9 @@ var ( // ErrUnexpectedValue is an error encountered when hcs returns an invalid value ErrUnexpectedValue = hcs.ErrUnexpectedValue + // ErrOperationDenied is an error when hcs attempts an operation that is explicitly denied + ErrOperationDenied = hcs.ErrOperationDenied + // ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container ErrVmcomputeAlreadyStopped = hcs.ErrVmcomputeAlreadyStopped diff --git a/vendor/github.com/Microsoft/hcsshim/functional_tests.ps1 b/vendor/github.com/Microsoft/hcsshim/functional_tests.ps1 deleted file mode 100644 index ce6edbcf329c9..0000000000000 --- a/vendor/github.com/Microsoft/hcsshim/functional_tests.ps1 +++ /dev/null @@ -1,12 +0,0 @@ -# Requirements so far: -# dockerd running -# - image microsoft/nanoserver (matching host base image) docker load -i c:\baseimages\nanoserver.tar -# - image alpine (linux) docker pull --platform=linux alpine - - -# TODO: Add this a parameter for debugging. ie "functional-tests -debug=$true" -#$env:HCSSHIM_FUNCTIONAL_TESTS_DEBUG="yes please" - -#pushd uvm -go test -v -tags "functional uvmcreate uvmscratch uvmscsi uvmvpmem uvmvsmb uvmp9" ./... -#popd \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/doc.go b/vendor/github.com/Microsoft/hcsshim/hcn/doc.go new file mode 100644 index 0000000000000..83b2fffb025d3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcn/doc.go @@ -0,0 +1,3 @@ +// Package hcn is a shim for the Host Compute Networking (HCN) service, which manages networking for Windows Server +// containers and Hyper-V containers. Previous to RS5, HCN was referred to as Host Networking Service (HNS). +package hcn diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcn.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcn.go index df3a59a78cb30..61bd5b5718bd9 100644 --- a/vendor/github.com/Microsoft/hcsshim/hcn/hcn.go +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcn.go @@ -1,5 +1,5 @@ -// Package hcn is a shim for the Host Compute Networking (HCN) service, which manages networking for Windows Server -// containers and Hyper-V containers. Previous to RS5, HCN was referred to as Host Networking Service (HNS). +//go:build windows + package hcn import ( @@ -10,7 +10,7 @@ import ( "github.com/Microsoft/go-winio/pkg/guid" ) -//go:generate go run ../mksyscall_windows.go -output zsyscall_windows.go hcn.go +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go hcn.go /// HNS V1 API @@ -228,7 +228,7 @@ func IPv6DualStackSupported() error { return platformDoesNotSupportError("IPv6 DualStack") } -//L4proxySupported returns an error if the HCN verison does not support L4Proxy +// L4proxySupported returns an error if the HCN version does not support L4Proxy func L4proxyPolicySupported() error { supported, err := GetCachedSupportedFeatures() if err != nil { @@ -240,7 +240,7 @@ func L4proxyPolicySupported() error { return platformDoesNotSupportError("L4ProxyPolicy") } -// L4WfpProxySupported returns an error if the HCN verison does not support L4WfpProxy +// L4WfpProxySupported returns an error if the HCN version does not support L4WfpProxy func L4WfpProxyPolicySupported() error { supported, err := GetCachedSupportedFeatures() if err != nil { diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go index 545e8639d6cfb..76f7c6f1f1226 100644 --- a/vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go @@ -1,3 +1,5 @@ +//go:build windows + package hcn import ( @@ -9,7 +11,7 @@ import ( "github.com/sirupsen/logrus" ) -// IpConfig is assoicated with an endpoint +// IpConfig is associated with an endpoint type IpConfig struct { IpAddress string `json:",omitempty"` PrefixLength uint8 `json:",omitempty"` @@ -70,14 +72,14 @@ type PolicyEndpointRequest struct { Policies []EndpointPolicy `json:",omitempty"` } -func getEndpoint(endpointGuid guid.GUID, query string) (*HostComputeEndpoint, error) { +func getEndpoint(endpointGUID guid.GUID, query string) (*HostComputeEndpoint, error) { // Open endpoint. var ( endpointHandle hcnEndpoint resultBuffer *uint16 propertiesBuffer *uint16 ) - hr := hcnOpenEndpoint(&endpointGuid, &endpointHandle, &resultBuffer) + hr := hcnOpenEndpoint(&endpointGUID, &endpointHandle, &resultBuffer) if err := checkForErrors("hcnOpenEndpoint", hr, resultBuffer); err != nil { return nil, err } @@ -119,8 +121,8 @@ func enumerateEndpoints(query string) ([]HostComputeEndpoint, error) { } var outputEndpoints []HostComputeEndpoint - for _, endpointGuid := range endpointIds { - endpoint, err := getEndpoint(endpointGuid, query) + for _, endpointGUID := range endpointIds { + endpoint, err := getEndpoint(endpointGUID, query) if err != nil { return nil, err } @@ -129,22 +131,22 @@ func enumerateEndpoints(query string) ([]HostComputeEndpoint, error) { return outputEndpoints, nil } -func createEndpoint(networkId string, endpointSettings string) (*HostComputeEndpoint, error) { - networkGuid, err := guid.FromString(networkId) +func createEndpoint(networkID string, endpointSettings string) (*HostComputeEndpoint, error) { + networkGUID, err := guid.FromString(networkID) if err != nil { return nil, errInvalidNetworkID } // Open network. var networkHandle hcnNetwork var resultBuffer *uint16 - hr := hcnOpenNetwork(&networkGuid, &networkHandle, &resultBuffer) + hr := hcnOpenNetwork(&networkGUID, &networkHandle, &resultBuffer) if err := checkForErrors("hcnOpenNetwork", hr, resultBuffer); err != nil { return nil, err } // Create endpoint. - endpointId := guid.GUID{} + endpointID := guid.GUID{} var endpointHandle hcnEndpoint - hr = hcnCreateEndpoint(networkHandle, &endpointId, endpointSettings, &endpointHandle, &resultBuffer) + hr = hcnCreateEndpoint(networkHandle, &endpointID, endpointSettings, &endpointHandle, &resultBuffer) if err := checkForErrors("hcnCreateEndpoint", hr, resultBuffer); err != nil { return nil, err } @@ -178,8 +180,8 @@ func createEndpoint(networkId string, endpointSettings string) (*HostComputeEndp return &outputEndpoint, nil } -func modifyEndpoint(endpointId string, settings string) (*HostComputeEndpoint, error) { - endpointGuid, err := guid.FromString(endpointId) +func modifyEndpoint(endpointID string, settings string) (*HostComputeEndpoint, error) { + endpointGUID, err := guid.FromString(endpointID) if err != nil { return nil, errInvalidEndpointID } @@ -189,7 +191,7 @@ func modifyEndpoint(endpointId string, settings string) (*HostComputeEndpoint, e resultBuffer *uint16 propertiesBuffer *uint16 ) - hr := hcnOpenEndpoint(&endpointGuid, &endpointHandle, &resultBuffer) + hr := hcnOpenEndpoint(&endpointGUID, &endpointHandle, &resultBuffer) if err := checkForErrors("hcnOpenEndpoint", hr, resultBuffer); err != nil { return nil, err } @@ -222,13 +224,13 @@ func modifyEndpoint(endpointId string, settings string) (*HostComputeEndpoint, e return &outputEndpoint, nil } -func deleteEndpoint(endpointId string) error { - endpointGuid, err := guid.FromString(endpointId) +func deleteEndpoint(endpointID string) error { + endpointGUID, err := guid.FromString(endpointID) if err != nil { return errInvalidEndpointID } var resultBuffer *uint16 - hr := hcnDeleteEndpoint(&endpointGuid, &resultBuffer) + hr := hcnDeleteEndpoint(&endpointGUID, &resultBuffer) if err := checkForErrors("hcnDeleteEndpoint", hr, resultBuffer); err != nil { return err } @@ -247,12 +249,12 @@ func ListEndpoints() ([]HostComputeEndpoint, error) { // ListEndpointsQuery makes a call to query the list of available endpoints. func ListEndpointsQuery(query HostComputeQuery) ([]HostComputeEndpoint, error) { - queryJson, err := json.Marshal(query) + queryJSON, err := json.Marshal(query) if err != nil { return nil, err } - endpoints, err := enumerateEndpoints(string(queryJson)) + endpoints, err := enumerateEndpoints(string(queryJSON)) if err != nil { return nil, err } @@ -260,10 +262,10 @@ func ListEndpointsQuery(query HostComputeQuery) ([]HostComputeEndpoint, error) { } // ListEndpointsOfNetwork queries the list of endpoints on a network. -func ListEndpointsOfNetwork(networkId string) ([]HostComputeEndpoint, error) { +func ListEndpointsOfNetwork(networkID string) ([]HostComputeEndpoint, error) { hcnQuery := defaultQuery() // TODO: Once query can convert schema, change to {HostComputeNetwork:networkId} - mapA := map[string]string{"VirtualNetwork": networkId} + mapA := map[string]string{"VirtualNetwork": networkID} filter, err := json.Marshal(mapA) if err != nil { return nil, err @@ -274,9 +276,9 @@ func ListEndpointsOfNetwork(networkId string) ([]HostComputeEndpoint, error) { } // GetEndpointByID returns an endpoint specified by Id -func GetEndpointByID(endpointId string) (*HostComputeEndpoint, error) { +func GetEndpointByID(endpointID string) (*HostComputeEndpoint, error) { hcnQuery := defaultQuery() - mapA := map[string]string{"ID": endpointId} + mapA := map[string]string{"ID": endpointID} filter, err := json.Marshal(mapA) if err != nil { return nil, err @@ -288,7 +290,7 @@ func GetEndpointByID(endpointId string) (*HostComputeEndpoint, error) { return nil, err } if len(endpoints) == 0 { - return nil, EndpointNotFoundError{EndpointID: endpointId} + return nil, EndpointNotFoundError{EndpointID: endpointID} } return &endpoints[0], err } @@ -345,15 +347,15 @@ func (endpoint *HostComputeEndpoint) Delete() error { } // ModifyEndpointSettings updates the Port/Policy of an Endpoint. -func ModifyEndpointSettings(endpointId string, request *ModifyEndpointSettingRequest) error { - logrus.Debugf("hcn::HostComputeEndpoint::ModifyEndpointSettings id=%s", endpointId) +func ModifyEndpointSettings(endpointID string, request *ModifyEndpointSettingRequest) error { + logrus.Debugf("hcn::HostComputeEndpoint::ModifyEndpointSettings id=%s", endpointID) endpointSettingsRequest, err := json.Marshal(request) if err != nil { return err } - _, err = modifyEndpoint(endpointId, string(endpointSettingsRequest)) + _, err = modifyEndpoint(endpointID, string(endpointSettingsRequest)) if err != nil { return err } @@ -364,25 +366,25 @@ func ModifyEndpointSettings(endpointId string, request *ModifyEndpointSettingReq func (endpoint *HostComputeEndpoint) ApplyPolicy(requestType RequestType, endpointPolicy PolicyEndpointRequest) error { logrus.Debugf("hcn::HostComputeEndpoint::ApplyPolicy id=%s", endpoint.Id) - settingsJson, err := json.Marshal(endpointPolicy) + settingsJSON, err := json.Marshal(endpointPolicy) if err != nil { return err } requestMessage := &ModifyEndpointSettingRequest{ ResourceType: EndpointResourceTypePolicy, RequestType: requestType, - Settings: settingsJson, + Settings: settingsJSON, } return ModifyEndpointSettings(endpoint.Id, requestMessage) } // NamespaceAttach modifies a Namespace to add an endpoint. -func (endpoint *HostComputeEndpoint) NamespaceAttach(namespaceId string) error { - return AddNamespaceEndpoint(namespaceId, endpoint.Id) +func (endpoint *HostComputeEndpoint) NamespaceAttach(namespaceID string) error { + return AddNamespaceEndpoint(namespaceID, endpoint.Id) } // NamespaceDetach modifies a Namespace to remove an endpoint. -func (endpoint *HostComputeEndpoint) NamespaceDetach(namespaceId string) error { - return RemoveNamespaceEndpoint(namespaceId, endpoint.Id) +func (endpoint *HostComputeEndpoint) NamespaceDetach(namespaceID string) error { + return RemoveNamespaceEndpoint(namespaceID, endpoint.Id) } diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go index ad30d320d97eb..81b56b9ffca3c 100644 --- a/vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go @@ -1,5 +1,5 @@ -// Package hcn is a shim for the Host Compute Networking (HCN) service, which manages networking for Windows Server -// containers and Hyper-V containers. Previous to RS5, HCN was referred to as Host Networking Service (HNS). +//go:build windows + package hcn import ( @@ -10,6 +10,7 @@ import ( "github.com/Microsoft/hcsshim/internal/hcserror" "github.com/Microsoft/hcsshim/internal/interop" "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" ) var ( @@ -48,8 +49,8 @@ type ErrorCode uint32 // For common errors, define the error as it is in windows, so we can quickly determine it later const ( - ERROR_NOT_FOUND = 0x490 - HCN_E_PORT_ALREADY_EXISTS ErrorCode = 0x803b0013 + ERROR_NOT_FOUND = ErrorCode(windows.ERROR_NOT_FOUND) + HCN_E_PORT_ALREADY_EXISTS ErrorCode = ErrorCode(windows.HCN_E_PORT_ALREADY_EXISTS) ) type HcnError struct { @@ -87,10 +88,10 @@ func new(hr error, title string, rest string) error { // // Note that the below errors are not errors returned by hcn itself -// we wish to seperate them as they are shim usage error +// we wish to separate them as they are shim usage error // -// NetworkNotFoundError results from a failed seach for a network by Id or Name +// NetworkNotFoundError results from a failed search for a network by Id or Name type NetworkNotFoundError struct { NetworkName string NetworkID string @@ -103,7 +104,7 @@ func (e NetworkNotFoundError) Error() string { return fmt.Sprintf("Network ID %q not found", e.NetworkID) } -// EndpointNotFoundError results from a failed seach for an endpoint by Id or Name +// EndpointNotFoundError results from a failed search for an endpoint by Id or Name type EndpointNotFoundError struct { EndpointName string EndpointID string @@ -116,7 +117,7 @@ func (e EndpointNotFoundError) Error() string { return fmt.Sprintf("Endpoint ID %q not found", e.EndpointID) } -// NamespaceNotFoundError results from a failed seach for a namsepace by Id +// NamespaceNotFoundError results from a failed search for a namsepace by Id type NamespaceNotFoundError struct { NamespaceID string } @@ -125,7 +126,7 @@ func (e NamespaceNotFoundError) Error() string { return fmt.Sprintf("Namespace ID %q not found", e.NamespaceID) } -// LoadBalancerNotFoundError results from a failed seach for a loadbalancer by Id +// LoadBalancerNotFoundError results from a failed search for a loadbalancer by Id type LoadBalancerNotFoundError struct { LoadBalancerId string } @@ -134,7 +135,7 @@ func (e LoadBalancerNotFoundError) Error() string { return fmt.Sprintf("LoadBalancer %q not found", e.LoadBalancerId) } -// RouteNotFoundError results from a failed seach for a route by Id +// RouteNotFoundError results from a failed search for a route by Id type RouteNotFoundError struct { RouteId string } diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go index 14903bc5e9132..25e368fc23beb 100644 --- a/vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go @@ -1,3 +1,5 @@ +//go:build windows + package hcn import ( diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go index 1b434b07b3adf..4add34f374fb4 100644 --- a/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go @@ -1,3 +1,5 @@ +//go:build windows + package hcn import ( @@ -28,14 +30,14 @@ type HostComputeLoadBalancer struct { Flags LoadBalancerFlags `json:",omitempty"` // 0: None, 1: EnableDirectServerReturn } -//LoadBalancerFlags modify settings for a loadbalancer. +// LoadBalancerFlags modify settings for a loadbalancer. type LoadBalancerFlags uint32 var ( // LoadBalancerFlagsNone is the default. LoadBalancerFlagsNone LoadBalancerFlags = 0 // LoadBalancerFlagsDSR enables Direct Server Return (DSR) - LoadBalancerFlagsDSR LoadBalancerFlags = 1 + LoadBalancerFlagsDSR LoadBalancerFlags = 1 LoadBalancerFlagsIPv6 LoadBalancerFlags = 2 ) @@ -67,14 +69,14 @@ var ( LoadBalancerDistributionSourceIP LoadBalancerDistribution = 2 ) -func getLoadBalancer(loadBalancerGuid guid.GUID, query string) (*HostComputeLoadBalancer, error) { +func getLoadBalancer(loadBalancerGUID guid.GUID, query string) (*HostComputeLoadBalancer, error) { // Open loadBalancer. var ( loadBalancerHandle hcnLoadBalancer resultBuffer *uint16 propertiesBuffer *uint16 ) - hr := hcnOpenLoadBalancer(&loadBalancerGuid, &loadBalancerHandle, &resultBuffer) + hr := hcnOpenLoadBalancer(&loadBalancerGUID, &loadBalancerHandle, &resultBuffer) if err := checkForErrors("hcnOpenLoadBalancer", hr, resultBuffer); err != nil { return nil, err } @@ -115,8 +117,8 @@ func enumerateLoadBalancers(query string) ([]HostComputeLoadBalancer, error) { } var outputLoadBalancers []HostComputeLoadBalancer - for _, loadBalancerGuid := range loadBalancerIds { - loadBalancer, err := getLoadBalancer(loadBalancerGuid, query) + for _, loadBalancerGUID := range loadBalancerIds { + loadBalancer, err := getLoadBalancer(loadBalancerGUID, query) if err != nil { return nil, err } @@ -132,8 +134,8 @@ func createLoadBalancer(settings string) (*HostComputeLoadBalancer, error) { resultBuffer *uint16 propertiesBuffer *uint16 ) - loadBalancerGuid := guid.GUID{} - hr := hcnCreateLoadBalancer(&loadBalancerGuid, settings, &loadBalancerHandle, &resultBuffer) + loadBalancerGUID := guid.GUID{} + hr := hcnCreateLoadBalancer(&loadBalancerGUID, settings, &loadBalancerHandle, &resultBuffer) if err := checkForErrors("hcnCreateLoadBalancer", hr, resultBuffer); err != nil { return nil, err } @@ -161,13 +163,13 @@ func createLoadBalancer(settings string) (*HostComputeLoadBalancer, error) { return &outputLoadBalancer, nil } -func deleteLoadBalancer(loadBalancerId string) error { - loadBalancerGuid, err := guid.FromString(loadBalancerId) +func deleteLoadBalancer(loadBalancerID string) error { + loadBalancerGUID, err := guid.FromString(loadBalancerID) if err != nil { return errInvalidLoadBalancerID } var resultBuffer *uint16 - hr := hcnDeleteLoadBalancer(&loadBalancerGuid, &resultBuffer) + hr := hcnDeleteLoadBalancer(&loadBalancerGUID, &resultBuffer) if err := checkForErrors("hcnDeleteLoadBalancer", hr, resultBuffer); err != nil { return err } @@ -186,12 +188,12 @@ func ListLoadBalancers() ([]HostComputeLoadBalancer, error) { // ListLoadBalancersQuery makes a call to query the list of available loadBalancers. func ListLoadBalancersQuery(query HostComputeQuery) ([]HostComputeLoadBalancer, error) { - queryJson, err := json.Marshal(query) + queryJSON, err := json.Marshal(query) if err != nil { return nil, err } - loadBalancers, err := enumerateLoadBalancers(string(queryJson)) + loadBalancers, err := enumerateLoadBalancers(string(queryJSON)) if err != nil { return nil, err } @@ -199,9 +201,9 @@ func ListLoadBalancersQuery(query HostComputeQuery) ([]HostComputeLoadBalancer, } // GetLoadBalancerByID returns the LoadBalancer specified by Id. -func GetLoadBalancerByID(loadBalancerId string) (*HostComputeLoadBalancer, error) { +func GetLoadBalancerByID(loadBalancerID string) (*HostComputeLoadBalancer, error) { hcnQuery := defaultQuery() - mapA := map[string]string{"ID": loadBalancerId} + mapA := map[string]string{"ID": loadBalancerID} filter, err := json.Marshal(mapA) if err != nil { return nil, err @@ -213,7 +215,7 @@ func GetLoadBalancerByID(loadBalancerId string) (*HostComputeLoadBalancer, error return nil, err } if len(loadBalancers) == 0 { - return nil, LoadBalancerNotFoundError{LoadBalancerId: loadBalancerId} + return nil, LoadBalancerNotFoundError{LoadBalancerId: loadBalancerID} } return &loadBalancers[0], err } diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go index d2ef2296099ab..5768eac158ffd 100644 --- a/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go @@ -1,3 +1,5 @@ +//go:build windows + package hcn import ( @@ -27,7 +29,7 @@ type NamespaceResourceContainer struct { type NamespaceResourceType string var ( - // NamespaceResourceTypeContainer are contianers associated with a Namespace. + // NamespaceResourceTypeContainer are containers associated with a Namespace. NamespaceResourceTypeContainer NamespaceResourceType = "Container" // NamespaceResourceTypeEndpoint are endpoints associated with a Namespace. NamespaceResourceTypeEndpoint NamespaceResourceType = "Endpoint" @@ -70,14 +72,14 @@ type ModifyNamespaceSettingRequest struct { Settings json.RawMessage `json:",omitempty"` } -func getNamespace(namespaceGuid guid.GUID, query string) (*HostComputeNamespace, error) { +func getNamespace(namespaceGUID guid.GUID, query string) (*HostComputeNamespace, error) { // Open namespace. var ( namespaceHandle hcnNamespace resultBuffer *uint16 propertiesBuffer *uint16 ) - hr := hcnOpenNamespace(&namespaceGuid, &namespaceHandle, &resultBuffer) + hr := hcnOpenNamespace(&namespaceGUID, &namespaceHandle, &resultBuffer) if err := checkForErrors("hcnOpenNamespace", hr, resultBuffer); err != nil { return nil, err } @@ -118,8 +120,8 @@ func enumerateNamespaces(query string) ([]HostComputeNamespace, error) { } var outputNamespaces []HostComputeNamespace - for _, namespaceGuid := range namespaceIds { - namespace, err := getNamespace(namespaceGuid, query) + for _, namespaceGUID := range namespaceIds { + namespace, err := getNamespace(namespaceGUID, query) if err != nil { return nil, err } @@ -135,8 +137,8 @@ func createNamespace(settings string) (*HostComputeNamespace, error) { resultBuffer *uint16 propertiesBuffer *uint16 ) - namespaceGuid := guid.GUID{} - hr := hcnCreateNamespace(&namespaceGuid, settings, &namespaceHandle, &resultBuffer) + namespaceGUID := guid.GUID{} + hr := hcnCreateNamespace(&namespaceGUID, settings, &namespaceHandle, &resultBuffer) if err := checkForErrors("hcnCreateNamespace", hr, resultBuffer); err != nil { return nil, err } @@ -164,8 +166,8 @@ func createNamespace(settings string) (*HostComputeNamespace, error) { return &outputNamespace, nil } -func modifyNamespace(namespaceId string, settings string) (*HostComputeNamespace, error) { - namespaceGuid, err := guid.FromString(namespaceId) +func modifyNamespace(namespaceID string, settings string) (*HostComputeNamespace, error) { + namespaceGUID, err := guid.FromString(namespaceID) if err != nil { return nil, errInvalidNamespaceID } @@ -175,7 +177,7 @@ func modifyNamespace(namespaceId string, settings string) (*HostComputeNamespace resultBuffer *uint16 propertiesBuffer *uint16 ) - hr := hcnOpenNamespace(&namespaceGuid, &namespaceHandle, &resultBuffer) + hr := hcnOpenNamespace(&namespaceGUID, &namespaceHandle, &resultBuffer) if err := checkForErrors("hcnOpenNamespace", hr, resultBuffer); err != nil { return nil, err } @@ -208,13 +210,13 @@ func modifyNamespace(namespaceId string, settings string) (*HostComputeNamespace return &outputNamespace, nil } -func deleteNamespace(namespaceId string) error { - namespaceGuid, err := guid.FromString(namespaceId) +func deleteNamespace(namespaceID string) error { + namespaceGUID, err := guid.FromString(namespaceID) if err != nil { return errInvalidNamespaceID } var resultBuffer *uint16 - hr := hcnDeleteNamespace(&namespaceGuid, &resultBuffer) + hr := hcnDeleteNamespace(&namespaceGUID, &resultBuffer) if err := checkForErrors("hcnDeleteNamespace", hr, resultBuffer); err != nil { return err } @@ -233,12 +235,12 @@ func ListNamespaces() ([]HostComputeNamespace, error) { // ListNamespacesQuery makes a call to query the list of available namespaces. func ListNamespacesQuery(query HostComputeQuery) ([]HostComputeNamespace, error) { - queryJson, err := json.Marshal(query) + queryJSON, err := json.Marshal(query) if err != nil { return nil, err } - namespaces, err := enumerateNamespaces(string(queryJson)) + namespaces, err := enumerateNamespaces(string(queryJSON)) if err != nil { return nil, err } @@ -246,9 +248,9 @@ func ListNamespacesQuery(query HostComputeQuery) ([]HostComputeNamespace, error) } // GetNamespaceByID returns the Namespace specified by Id. -func GetNamespaceByID(namespaceId string) (*HostComputeNamespace, error) { +func GetNamespaceByID(namespaceID string) (*HostComputeNamespace, error) { hcnQuery := defaultQuery() - mapA := map[string]string{"ID": namespaceId} + mapA := map[string]string{"ID": namespaceID} filter, err := json.Marshal(mapA) if err != nil { return nil, err @@ -260,15 +262,15 @@ func GetNamespaceByID(namespaceId string) (*HostComputeNamespace, error) { return nil, err } if len(namespaces) == 0 { - return nil, NamespaceNotFoundError{NamespaceID: namespaceId} + return nil, NamespaceNotFoundError{NamespaceID: namespaceID} } return &namespaces[0], err } // GetNamespaceEndpointIds returns the endpoints of the Namespace specified by Id. -func GetNamespaceEndpointIds(namespaceId string) ([]string, error) { - namespace, err := GetNamespaceByID(namespaceId) +func GetNamespaceEndpointIds(namespaceID string) ([]string, error) { + namespace, err := GetNamespaceByID(namespaceID) if err != nil { return nil, err } @@ -286,19 +288,19 @@ func GetNamespaceEndpointIds(namespaceId string) ([]string, error) { } // GetNamespaceContainerIds returns the containers of the Namespace specified by Id. -func GetNamespaceContainerIds(namespaceId string) ([]string, error) { - namespace, err := GetNamespaceByID(namespaceId) +func GetNamespaceContainerIds(namespaceID string) ([]string, error) { + namespace, err := GetNamespaceByID(namespaceID) if err != nil { return nil, err } var containerIds []string for _, resource := range namespace.Resources { if resource.Type == "Container" { - var contaienrResource NamespaceResourceContainer - if err := json.Unmarshal([]byte(resource.Data), &contaienrResource); err != nil { + var containerResource NamespaceResourceContainer + if err := json.Unmarshal([]byte(resource.Data), &containerResource); err != nil { return nil, err } - containerIds = append(containerIds, contaienrResource.Id) + containerIds = append(containerIds, containerResource.Id) } } return containerIds, nil @@ -375,7 +377,7 @@ func (namespace *HostComputeNamespace) Sync() error { } shimPath := runhcs.VMPipePath(cfg.HostUniqueID) if err := runhcs.IssueVMRequest(shimPath, &req); err != nil { - // The shim is likey gone. Simply ignore the sync as if it didn't exist. + // The shim is likely gone. Simply ignore the sync as if it didn't exist. if perr, ok := err.(*os.PathError); ok && perr.Err == syscall.ERROR_FILE_NOT_FOUND { // Remove the reg key there is no point to try again _ = cfg.Remove() @@ -394,15 +396,15 @@ func (namespace *HostComputeNamespace) Sync() error { } // ModifyNamespaceSettings updates the Endpoints/Containers of a Namespace. -func ModifyNamespaceSettings(namespaceId string, request *ModifyNamespaceSettingRequest) error { - logrus.Debugf("hcn::HostComputeNamespace::ModifyNamespaceSettings id=%s", namespaceId) +func ModifyNamespaceSettings(namespaceID string, request *ModifyNamespaceSettingRequest) error { + logrus.Debugf("hcn::HostComputeNamespace::ModifyNamespaceSettings id=%s", namespaceID) namespaceSettings, err := json.Marshal(request) if err != nil { return err } - _, err = modifyNamespace(namespaceId, string(namespaceSettings)) + _, err = modifyNamespace(namespaceID, string(namespaceSettings)) if err != nil { return err } @@ -410,37 +412,37 @@ func ModifyNamespaceSettings(namespaceId string, request *ModifyNamespaceSetting } // AddNamespaceEndpoint adds an endpoint to a Namespace. -func AddNamespaceEndpoint(namespaceId string, endpointId string) error { - logrus.Debugf("hcn::HostComputeEndpoint::AddNamespaceEndpoint id=%s", endpointId) +func AddNamespaceEndpoint(namespaceID string, endpointID string) error { + logrus.Debugf("hcn::HostComputeEndpoint::AddNamespaceEndpoint id=%s", endpointID) - mapA := map[string]string{"EndpointId": endpointId} - settingsJson, err := json.Marshal(mapA) + mapA := map[string]string{"EndpointId": endpointID} + settingsJSON, err := json.Marshal(mapA) if err != nil { return err } requestMessage := &ModifyNamespaceSettingRequest{ ResourceType: NamespaceResourceTypeEndpoint, RequestType: RequestTypeAdd, - Settings: settingsJson, + Settings: settingsJSON, } - return ModifyNamespaceSettings(namespaceId, requestMessage) + return ModifyNamespaceSettings(namespaceID, requestMessage) } // RemoveNamespaceEndpoint removes an endpoint from a Namespace. -func RemoveNamespaceEndpoint(namespaceId string, endpointId string) error { - logrus.Debugf("hcn::HostComputeNamespace::RemoveNamespaceEndpoint id=%s", endpointId) +func RemoveNamespaceEndpoint(namespaceID string, endpointID string) error { + logrus.Debugf("hcn::HostComputeNamespace::RemoveNamespaceEndpoint id=%s", endpointID) - mapA := map[string]string{"EndpointId": endpointId} - settingsJson, err := json.Marshal(mapA) + mapA := map[string]string{"EndpointId": endpointID} + settingsJSON, err := json.Marshal(mapA) if err != nil { return err } requestMessage := &ModifyNamespaceSettingRequest{ ResourceType: NamespaceResourceTypeEndpoint, RequestType: RequestTypeRemove, - Settings: settingsJson, + Settings: settingsJSON, } - return ModifyNamespaceSettings(namespaceId, requestMessage) + return ModifyNamespaceSettings(namespaceID, requestMessage) } diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go index c36b136387a99..1b8b0dd4de374 100644 --- a/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go @@ -1,3 +1,5 @@ +//go:build windows + package hcn import ( @@ -110,14 +112,14 @@ type PolicyNetworkRequest struct { Policies []NetworkPolicy `json:",omitempty"` } -func getNetwork(networkGuid guid.GUID, query string) (*HostComputeNetwork, error) { +func getNetwork(networkGUID guid.GUID, query string) (*HostComputeNetwork, error) { // Open network. var ( networkHandle hcnNetwork resultBuffer *uint16 propertiesBuffer *uint16 ) - hr := hcnOpenNetwork(&networkGuid, &networkHandle, &resultBuffer) + hr := hcnOpenNetwork(&networkGUID, &networkHandle, &resultBuffer) if err := checkForErrors("hcnOpenNetwork", hr, resultBuffer); err != nil { return nil, err } @@ -164,8 +166,8 @@ func enumerateNetworks(query string) ([]HostComputeNetwork, error) { } var outputNetworks []HostComputeNetwork - for _, networkGuid := range networkIds { - network, err := getNetwork(networkGuid, query) + for _, networkGUID := range networkIds { + network, err := getNetwork(networkGUID, query) if err != nil { return nil, err } @@ -181,8 +183,8 @@ func createNetwork(settings string) (*HostComputeNetwork, error) { resultBuffer *uint16 propertiesBuffer *uint16 ) - networkGuid := guid.GUID{} - hr := hcnCreateNetwork(&networkGuid, settings, &networkHandle, &resultBuffer) + networkGUID := guid.GUID{} + hr := hcnCreateNetwork(&networkGUID, settings, &networkHandle, &resultBuffer) if err := checkForErrors("hcnCreateNetwork", hr, resultBuffer); err != nil { return nil, err } @@ -216,8 +218,8 @@ func createNetwork(settings string) (*HostComputeNetwork, error) { return &outputNetwork, nil } -func modifyNetwork(networkId string, settings string) (*HostComputeNetwork, error) { - networkGuid, err := guid.FromString(networkId) +func modifyNetwork(networkID string, settings string) (*HostComputeNetwork, error) { + networkGUID, err := guid.FromString(networkID) if err != nil { return nil, errInvalidNetworkID } @@ -227,7 +229,7 @@ func modifyNetwork(networkId string, settings string) (*HostComputeNetwork, erro resultBuffer *uint16 propertiesBuffer *uint16 ) - hr := hcnOpenNetwork(&networkGuid, &networkHandle, &resultBuffer) + hr := hcnOpenNetwork(&networkGUID, &networkHandle, &resultBuffer) if err := checkForErrors("hcnOpenNetwork", hr, resultBuffer); err != nil { return nil, err } @@ -266,13 +268,13 @@ func modifyNetwork(networkId string, settings string) (*HostComputeNetwork, erro return &outputNetwork, nil } -func deleteNetwork(networkId string) error { - networkGuid, err := guid.FromString(networkId) +func deleteNetwork(networkID string) error { + networkGUID, err := guid.FromString(networkID) if err != nil { return errInvalidNetworkID } var resultBuffer *uint16 - hr := hcnDeleteNetwork(&networkGuid, &resultBuffer) + hr := hcnDeleteNetwork(&networkGUID, &resultBuffer) if err := checkForErrors("hcnDeleteNetwork", hr, resultBuffer); err != nil { return err } @@ -291,12 +293,12 @@ func ListNetworks() ([]HostComputeNetwork, error) { // ListNetworksQuery makes a call to query the list of available networks. func ListNetworksQuery(query HostComputeQuery) ([]HostComputeNetwork, error) { - queryJson, err := json.Marshal(query) + queryJSON, err := json.Marshal(query) if err != nil { return nil, err } - networks, err := enumerateNetworks(string(queryJson)) + networks, err := enumerateNetworks(string(queryJSON)) if err != nil { return nil, err } @@ -408,14 +410,14 @@ func (network *HostComputeNetwork) ModifyNetworkSettings(request *ModifyNetworkS func (network *HostComputeNetwork) AddPolicy(networkPolicy PolicyNetworkRequest) error { logrus.Debugf("hcn::HostComputeNetwork::AddPolicy id=%s", network.Id) - settingsJson, err := json.Marshal(networkPolicy) + settingsJSON, err := json.Marshal(networkPolicy) if err != nil { return err } requestMessage := &ModifyNetworkSettingRequest{ ResourceType: NetworkResourceTypePolicy, RequestType: RequestTypeAdd, - Settings: settingsJson, + Settings: settingsJSON, } return network.ModifyNetworkSettings(requestMessage) @@ -425,14 +427,14 @@ func (network *HostComputeNetwork) AddPolicy(networkPolicy PolicyNetworkRequest) func (network *HostComputeNetwork) RemovePolicy(networkPolicy PolicyNetworkRequest) error { logrus.Debugf("hcn::HostComputeNetwork::RemovePolicy id=%s", network.Id) - settingsJson, err := json.Marshal(networkPolicy) + settingsJSON, err := json.Marshal(networkPolicy) if err != nil { return err } requestMessage := &ModifyNetworkSettingRequest{ ResourceType: NetworkResourceTypePolicy, RequestType: RequestTypeRemove, - Settings: settingsJson, + Settings: settingsJSON, } return network.ModifyNetworkSettings(requestMessage) diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go index c2aa599f3394b..dd381aec04abe 100644 --- a/vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go @@ -1,3 +1,5 @@ +//go:build windows + package hcn import ( @@ -23,7 +25,7 @@ const ( // Endpoint and Network have InterfaceConstraint and ProviderAddress NetworkProviderAddress EndpointPolicyType = "ProviderAddress" NetworkInterfaceConstraint EndpointPolicyType = "InterfaceConstraint" - TierAcl EndpointPolicyType = "TierAcl" + TierAcl EndpointPolicyType = "TierAcl" ) // EndpointPolicy is a collection of Policy settings for an Endpoint. @@ -133,7 +135,7 @@ type AclPolicySetting struct { RemotePorts string `json:",omitempty"` RuleType RuleType `json:",omitempty"` Priority uint16 `json:",omitempty"` -} +} // QosPolicySetting sets Quality of Service bandwidth caps on an Endpoint. type QosPolicySetting struct { @@ -166,7 +168,7 @@ type NetworkACLPolicySetting struct { RemotePorts string `json:",omitempty"` RuleType RuleType `json:",omitempty"` Priority uint16 `json:",omitempty"` -} +} // FiveTuple is nested in L4ProxyPolicySetting for WFP support. type FiveTuple struct { @@ -284,7 +286,7 @@ type RemoteSubnetRoutePolicySetting struct { type SetPolicyType string const ( - SetPolicyTypeIpSet SetPolicyType = "IPSET" + SetPolicyTypeIpSet SetPolicyType = "IPSET" SetPolicyTypeNestedIpSet SetPolicyType = "NESTEDIPSET" ) @@ -292,7 +294,7 @@ const ( type SetPolicySetting struct { Id string Name string - Type SetPolicyType + Type SetPolicyType `json:"PolicyType"` Values string } @@ -313,32 +315,32 @@ const ( ProtocolTypeICMPv6 ProtocolType = 58 ) -//L4ProxyPolicySetting applies proxy policy on network/endpoint +// L4ProxyPolicySetting applies proxy policy on network/endpoint type L4ProxyPolicySetting struct { IP string `json:",omitempty"` Port string `json:",omitempty"` Protocol ProtocolType `json:",omitempty"` Exceptions []string `json:",omitempty"` Destination string - OutboundNAT bool `json:",omitempty"` + OutboundNAT bool `json:",omitempty"` } // TierAclRule represents an ACL within TierAclPolicySetting type TierAclRule struct { - Id string `json:",omitempty"` - Protocols string `json:",omitempty"` - TierAclRuleAction ActionType `json:","` - LocalAddresses string `json:",omitempty"` - RemoteAddresses string `json:",omitempty"` - LocalPorts string `json:",omitempty"` - RemotePorts string `json:",omitempty"` - Priority uint16 `json:",omitempty"` + Id string `json:",omitempty"` + Protocols string `json:",omitempty"` + TierAclRuleAction ActionType `json:","` + LocalAddresses string `json:",omitempty"` + RemoteAddresses string `json:",omitempty"` + LocalPorts string `json:",omitempty"` + RemotePorts string `json:",omitempty"` + Priority uint16 `json:",omitempty"` } // TierAclPolicySetting represents a Tier containing ACLs type TierAclPolicySetting struct { - Name string `json:","` - Direction DirectionType `json:","` - Order uint16 `json:""` - TierAclRules []TierAclRule `json:",omitempty"` + Name string `json:","` + Direction DirectionType `json:","` + Order uint16 `json:""` + TierAclRules []TierAclRule `json:",omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go index 52e2498462446..d0761d6bd0f93 100644 --- a/vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go @@ -1,3 +1,5 @@ +//go:build windows + package hcn import ( diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go index bacb91fedaee1..1b4c24020524d 100644 --- a/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go @@ -1,11 +1,14 @@ +//go:build windows + package hcn import ( - "fmt" "sync" "github.com/pkg/errors" "github.com/sirupsen/logrus" + + "github.com/Microsoft/hcsshim/internal/log" ) var ( @@ -112,9 +115,9 @@ func getSupportedFeatures() (SupportedFeatures, error) { features.NetworkACL = isFeatureSupported(globals.Version, NetworkACLPolicyVersion) features.NestedIpSet = isFeatureSupported(globals.Version, NestedIpSetVersion) - logrus.WithFields(logrus.Fields{ - "version": fmt.Sprintf("%+v", globals.Version), - "supportedFeatures": fmt.Sprintf("%+v", features), + log.L.WithFields(logrus.Fields{ + "version": globals.Version, + "supportedFeatures": features, }).Info("HCN feature check") return features, nil diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go index 7ec5b58b66a8f..379023831ed72 100644 --- a/vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go @@ -1,4 +1,6 @@ -// Code generated mksyscall_windows.exe DO NOT EDIT +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. package hcn @@ -19,6 +21,7 @@ const ( var ( errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL ) // errnoErr returns common boxed Errno values, to prevent @@ -26,7 +29,7 @@ var ( func errnoErr(e syscall.Errno) error { switch e { case 0: - return nil + return errERROR_EINVAL case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } @@ -37,51 +40,55 @@ func errnoErr(e syscall.Errno) error { } var ( + modcomputenetwork = windows.NewLazySystemDLL("computenetwork.dll") modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") - modcomputenetwork = windows.NewLazySystemDLL("computenetwork.dll") - procSetCurrentThreadCompartmentId = modiphlpapi.NewProc("SetCurrentThreadCompartmentId") - procHNSCall = modvmcompute.NewProc("HNSCall") - procHcnEnumerateNetworks = modcomputenetwork.NewProc("HcnEnumerateNetworks") - procHcnCreateNetwork = modcomputenetwork.NewProc("HcnCreateNetwork") - procHcnOpenNetwork = modcomputenetwork.NewProc("HcnOpenNetwork") - procHcnModifyNetwork = modcomputenetwork.NewProc("HcnModifyNetwork") - procHcnQueryNetworkProperties = modcomputenetwork.NewProc("HcnQueryNetworkProperties") - procHcnDeleteNetwork = modcomputenetwork.NewProc("HcnDeleteNetwork") + procHcnCloseEndpoint = modcomputenetwork.NewProc("HcnCloseEndpoint") + procHcnCloseLoadBalancer = modcomputenetwork.NewProc("HcnCloseLoadBalancer") + procHcnCloseNamespace = modcomputenetwork.NewProc("HcnCloseNamespace") procHcnCloseNetwork = modcomputenetwork.NewProc("HcnCloseNetwork") - procHcnEnumerateEndpoints = modcomputenetwork.NewProc("HcnEnumerateEndpoints") + procHcnCloseSdnRoute = modcomputenetwork.NewProc("HcnCloseSdnRoute") procHcnCreateEndpoint = modcomputenetwork.NewProc("HcnCreateEndpoint") - procHcnOpenEndpoint = modcomputenetwork.NewProc("HcnOpenEndpoint") - procHcnModifyEndpoint = modcomputenetwork.NewProc("HcnModifyEndpoint") - procHcnQueryEndpointProperties = modcomputenetwork.NewProc("HcnQueryEndpointProperties") - procHcnDeleteEndpoint = modcomputenetwork.NewProc("HcnDeleteEndpoint") - procHcnCloseEndpoint = modcomputenetwork.NewProc("HcnCloseEndpoint") - procHcnEnumerateNamespaces = modcomputenetwork.NewProc("HcnEnumerateNamespaces") + procHcnCreateLoadBalancer = modcomputenetwork.NewProc("HcnCreateLoadBalancer") procHcnCreateNamespace = modcomputenetwork.NewProc("HcnCreateNamespace") - procHcnOpenNamespace = modcomputenetwork.NewProc("HcnOpenNamespace") - procHcnModifyNamespace = modcomputenetwork.NewProc("HcnModifyNamespace") - procHcnQueryNamespaceProperties = modcomputenetwork.NewProc("HcnQueryNamespaceProperties") + procHcnCreateNetwork = modcomputenetwork.NewProc("HcnCreateNetwork") + procHcnCreateSdnRoute = modcomputenetwork.NewProc("HcnCreateSdnRoute") + procHcnDeleteEndpoint = modcomputenetwork.NewProc("HcnDeleteEndpoint") + procHcnDeleteLoadBalancer = modcomputenetwork.NewProc("HcnDeleteLoadBalancer") procHcnDeleteNamespace = modcomputenetwork.NewProc("HcnDeleteNamespace") - procHcnCloseNamespace = modcomputenetwork.NewProc("HcnCloseNamespace") + procHcnDeleteNetwork = modcomputenetwork.NewProc("HcnDeleteNetwork") + procHcnDeleteSdnRoute = modcomputenetwork.NewProc("HcnDeleteSdnRoute") + procHcnEnumerateEndpoints = modcomputenetwork.NewProc("HcnEnumerateEndpoints") procHcnEnumerateLoadBalancers = modcomputenetwork.NewProc("HcnEnumerateLoadBalancers") - procHcnCreateLoadBalancer = modcomputenetwork.NewProc("HcnCreateLoadBalancer") - procHcnOpenLoadBalancer = modcomputenetwork.NewProc("HcnOpenLoadBalancer") - procHcnModifyLoadBalancer = modcomputenetwork.NewProc("HcnModifyLoadBalancer") - procHcnQueryLoadBalancerProperties = modcomputenetwork.NewProc("HcnQueryLoadBalancerProperties") - procHcnDeleteLoadBalancer = modcomputenetwork.NewProc("HcnDeleteLoadBalancer") - procHcnCloseLoadBalancer = modcomputenetwork.NewProc("HcnCloseLoadBalancer") + procHcnEnumerateNamespaces = modcomputenetwork.NewProc("HcnEnumerateNamespaces") + procHcnEnumerateNetworks = modcomputenetwork.NewProc("HcnEnumerateNetworks") procHcnEnumerateSdnRoutes = modcomputenetwork.NewProc("HcnEnumerateSdnRoutes") - procHcnCreateSdnRoute = modcomputenetwork.NewProc("HcnCreateSdnRoute") - procHcnOpenSdnRoute = modcomputenetwork.NewProc("HcnOpenSdnRoute") + procHcnModifyEndpoint = modcomputenetwork.NewProc("HcnModifyEndpoint") + procHcnModifyLoadBalancer = modcomputenetwork.NewProc("HcnModifyLoadBalancer") + procHcnModifyNamespace = modcomputenetwork.NewProc("HcnModifyNamespace") + procHcnModifyNetwork = modcomputenetwork.NewProc("HcnModifyNetwork") procHcnModifySdnRoute = modcomputenetwork.NewProc("HcnModifySdnRoute") + procHcnOpenEndpoint = modcomputenetwork.NewProc("HcnOpenEndpoint") + procHcnOpenLoadBalancer = modcomputenetwork.NewProc("HcnOpenLoadBalancer") + procHcnOpenNamespace = modcomputenetwork.NewProc("HcnOpenNamespace") + procHcnOpenNetwork = modcomputenetwork.NewProc("HcnOpenNetwork") + procHcnOpenSdnRoute = modcomputenetwork.NewProc("HcnOpenSdnRoute") + procHcnQueryEndpointProperties = modcomputenetwork.NewProc("HcnQueryEndpointProperties") + procHcnQueryLoadBalancerProperties = modcomputenetwork.NewProc("HcnQueryLoadBalancerProperties") + procHcnQueryNamespaceProperties = modcomputenetwork.NewProc("HcnQueryNamespaceProperties") + procHcnQueryNetworkProperties = modcomputenetwork.NewProc("HcnQueryNetworkProperties") procHcnQuerySdnRouteProperties = modcomputenetwork.NewProc("HcnQuerySdnRouteProperties") - procHcnDeleteSdnRoute = modcomputenetwork.NewProc("HcnDeleteSdnRoute") - procHcnCloseSdnRoute = modcomputenetwork.NewProc("HcnCloseSdnRoute") + procSetCurrentThreadCompartmentId = modiphlpapi.NewProc("SetCurrentThreadCompartmentId") + procHNSCall = modvmcompute.NewProc("HNSCall") ) -func SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) { - r0, _, _ := syscall.Syscall(procSetCurrentThreadCompartmentId.Addr(), 1, uintptr(compartmentId), 0, 0) +func hcnCloseEndpoint(endpoint hcnEndpoint) (hr error) { + hr = procHcnCloseEndpoint.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnCloseEndpoint.Addr(), 1, uintptr(endpoint), 0, 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -91,30 +98,27 @@ func SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) { return } -func _hnsCall(method string, path string, object string, response **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(method) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(path) +func hcnCloseLoadBalancer(loadBalancer hcnLoadBalancer) (hr error) { + hr = procHcnCloseLoadBalancer.Find() if hr != nil { return } - var _p2 *uint16 - _p2, hr = syscall.UTF16PtrFromString(object) - if hr != nil { - return + r0, _, _ := syscall.Syscall(procHcnCloseLoadBalancer.Addr(), 1, uintptr(loadBalancer), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } - return __hnsCall(_p0, _p1, _p2, response) + return } -func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16) (hr error) { - if hr = procHNSCall.Find(); hr != nil { +func hcnCloseNamespace(namespace hcnNamespace) (hr error) { + hr = procHcnCloseNamespace.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall6(procHNSCall.Addr(), 4, uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)), 0, 0) + r0, _, _ := syscall.Syscall(procHcnCloseNamespace.Addr(), 1, uintptr(namespace), 0, 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -124,20 +128,27 @@ func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16) return } -func hcnEnumerateNetworks(query string, networks **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) +func hcnCloseNetwork(network hcnNetwork) (hr error) { + hr = procHcnCloseNetwork.Find() if hr != nil { return } - return _hcnEnumerateNetworks(_p0, networks, result) + r0, _, _ := syscall.Syscall(procHcnCloseNetwork.Addr(), 1, uintptr(network), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return } -func _hcnEnumerateNetworks(query *uint16, networks **uint16, result **uint16) (hr error) { - if hr = procHcnEnumerateNetworks.Find(); hr != nil { +func hcnCloseRoute(route hcnRoute) (hr error) { + hr = procHcnCloseSdnRoute.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcnEnumerateNetworks.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(networks)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.Syscall(procHcnCloseSdnRoute.Addr(), 1, uintptr(route), 0, 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -147,20 +158,21 @@ func _hcnEnumerateNetworks(query *uint16, networks **uint16, result **uint16) (h return } -func hcnCreateNetwork(id *_guid, settings string, network *hcnNetwork, result **uint16) (hr error) { +func hcnCreateEndpoint(network hcnNetwork, id *_guid, settings string, endpoint *hcnEndpoint, result **uint16) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(settings) if hr != nil { return } - return _hcnCreateNetwork(id, _p0, network, result) + return _hcnCreateEndpoint(network, id, _p0, endpoint, result) } -func _hcnCreateNetwork(id *_guid, settings *uint16, network *hcnNetwork, result **uint16) (hr error) { - if hr = procHcnCreateNetwork.Find(); hr != nil { +func _hcnCreateEndpoint(network hcnNetwork, id *_guid, settings *uint16, endpoint *hcnEndpoint, result **uint16) (hr error) { + hr = procHcnCreateEndpoint.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall6(procHcnCreateNetwork.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(network)), uintptr(unsafe.Pointer(result)), 0, 0) + r0, _, _ := syscall.Syscall6(procHcnCreateEndpoint.Addr(), 5, uintptr(network), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(endpoint)), uintptr(unsafe.Pointer(result)), 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -170,11 +182,21 @@ func _hcnCreateNetwork(id *_guid, settings *uint16, network *hcnNetwork, result return } -func hcnOpenNetwork(id *_guid, network *hcnNetwork, result **uint16) (hr error) { - if hr = procHcnOpenNetwork.Find(); hr != nil { +func hcnCreateLoadBalancer(id *_guid, settings string, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcnOpenNetwork.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(network)), uintptr(unsafe.Pointer(result))) + return _hcnCreateLoadBalancer(id, _p0, loadBalancer, result) +} + +func _hcnCreateLoadBalancer(id *_guid, settings *uint16, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) { + hr = procHcnCreateLoadBalancer.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnCreateLoadBalancer.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(loadBalancer)), uintptr(unsafe.Pointer(result)), 0, 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -184,20 +206,21 @@ func hcnOpenNetwork(id *_guid, network *hcnNetwork, result **uint16) (hr error) return } -func hcnModifyNetwork(network hcnNetwork, settings string, result **uint16) (hr error) { +func hcnCreateNamespace(id *_guid, settings string, namespace *hcnNamespace, result **uint16) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(settings) if hr != nil { return } - return _hcnModifyNetwork(network, _p0, result) + return _hcnCreateNamespace(id, _p0, namespace, result) } -func _hcnModifyNetwork(network hcnNetwork, settings *uint16, result **uint16) (hr error) { - if hr = procHcnModifyNetwork.Find(); hr != nil { +func _hcnCreateNamespace(id *_guid, settings *uint16, namespace *hcnNamespace, result **uint16) (hr error) { + hr = procHcnCreateNamespace.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcnModifyNetwork.Addr(), 3, uintptr(network), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.Syscall6(procHcnCreateNamespace.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(namespace)), uintptr(unsafe.Pointer(result)), 0, 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -207,20 +230,21 @@ func _hcnModifyNetwork(network hcnNetwork, settings *uint16, result **uint16) (h return } -func hcnQueryNetworkProperties(network hcnNetwork, query string, properties **uint16, result **uint16) (hr error) { +func hcnCreateNetwork(id *_guid, settings string, network *hcnNetwork, result **uint16) (hr error) { var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) + _p0, hr = syscall.UTF16PtrFromString(settings) if hr != nil { return } - return _hcnQueryNetworkProperties(network, _p0, properties, result) + return _hcnCreateNetwork(id, _p0, network, result) } -func _hcnQueryNetworkProperties(network hcnNetwork, query *uint16, properties **uint16, result **uint16) (hr error) { - if hr = procHcnQueryNetworkProperties.Find(); hr != nil { +func _hcnCreateNetwork(id *_guid, settings *uint16, network *hcnNetwork, result **uint16) (hr error) { + hr = procHcnCreateNetwork.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall6(procHcnQueryNetworkProperties.Addr(), 4, uintptr(network), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) + r0, _, _ := syscall.Syscall6(procHcnCreateNetwork.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(network)), uintptr(unsafe.Pointer(result)), 0, 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -230,11 +254,21 @@ func _hcnQueryNetworkProperties(network hcnNetwork, query *uint16, properties ** return } -func hcnDeleteNetwork(id *_guid, result **uint16) (hr error) { - if hr = procHcnDeleteNetwork.Find(); hr != nil { +func hcnCreateRoute(id *_guid, settings string, route *hcnRoute, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcnDeleteNetwork.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) + return _hcnCreateRoute(id, _p0, route, result) +} + +func _hcnCreateRoute(id *_guid, settings *uint16, route *hcnRoute, result **uint16) (hr error) { + hr = procHcnCreateSdnRoute.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnCreateSdnRoute.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(route)), uintptr(unsafe.Pointer(result)), 0, 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -244,11 +278,12 @@ func hcnDeleteNetwork(id *_guid, result **uint16) (hr error) { return } -func hcnCloseNetwork(network hcnNetwork) (hr error) { - if hr = procHcnCloseNetwork.Find(); hr != nil { +func hcnDeleteEndpoint(id *_guid, result **uint16) (hr error) { + hr = procHcnDeleteEndpoint.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcnCloseNetwork.Addr(), 1, uintptr(network), 0, 0) + r0, _, _ := syscall.Syscall(procHcnDeleteEndpoint.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -258,20 +293,12 @@ func hcnCloseNetwork(network hcnNetwork) (hr error) { return } -func hcnEnumerateEndpoints(query string, endpoints **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) +func hcnDeleteLoadBalancer(id *_guid, result **uint16) (hr error) { + hr = procHcnDeleteLoadBalancer.Find() if hr != nil { return } - return _hcnEnumerateEndpoints(_p0, endpoints, result) -} - -func _hcnEnumerateEndpoints(query *uint16, endpoints **uint16, result **uint16) (hr error) { - if hr = procHcnEnumerateEndpoints.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnEnumerateEndpoints.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(endpoints)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.Syscall(procHcnDeleteLoadBalancer.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -281,20 +308,12 @@ func _hcnEnumerateEndpoints(query *uint16, endpoints **uint16, result **uint16) return } -func hcnCreateEndpoint(network hcnNetwork, id *_guid, settings string, endpoint *hcnEndpoint, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) +func hcnDeleteNamespace(id *_guid, result **uint16) (hr error) { + hr = procHcnDeleteNamespace.Find() if hr != nil { return } - return _hcnCreateEndpoint(network, id, _p0, endpoint, result) -} - -func _hcnCreateEndpoint(network hcnNetwork, id *_guid, settings *uint16, endpoint *hcnEndpoint, result **uint16) (hr error) { - if hr = procHcnCreateEndpoint.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcnCreateEndpoint.Addr(), 5, uintptr(network), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(endpoint)), uintptr(unsafe.Pointer(result)), 0) + r0, _, _ := syscall.Syscall(procHcnDeleteNamespace.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -304,11 +323,12 @@ func _hcnCreateEndpoint(network hcnNetwork, id *_guid, settings *uint16, endpoin return } -func hcnOpenEndpoint(id *_guid, endpoint *hcnEndpoint, result **uint16) (hr error) { - if hr = procHcnOpenEndpoint.Find(); hr != nil { +func hcnDeleteNetwork(id *_guid, result **uint16) (hr error) { + hr = procHcnDeleteNetwork.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcnOpenEndpoint.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(endpoint)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.Syscall(procHcnDeleteNetwork.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -318,20 +338,12 @@ func hcnOpenEndpoint(id *_guid, endpoint *hcnEndpoint, result **uint16) (hr erro return } -func hcnModifyEndpoint(endpoint hcnEndpoint, settings string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) +func hcnDeleteRoute(id *_guid, result **uint16) (hr error) { + hr = procHcnDeleteSdnRoute.Find() if hr != nil { return } - return _hcnModifyEndpoint(endpoint, _p0, result) -} - -func _hcnModifyEndpoint(endpoint hcnEndpoint, settings *uint16, result **uint16) (hr error) { - if hr = procHcnModifyEndpoint.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnModifyEndpoint.Addr(), 3, uintptr(endpoint), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.Syscall(procHcnDeleteSdnRoute.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -341,20 +353,21 @@ func _hcnModifyEndpoint(endpoint hcnEndpoint, settings *uint16, result **uint16) return } -func hcnQueryEndpointProperties(endpoint hcnEndpoint, query string, properties **uint16, result **uint16) (hr error) { +func hcnEnumerateEndpoints(query string, endpoints **uint16, result **uint16) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(query) if hr != nil { return } - return _hcnQueryEndpointProperties(endpoint, _p0, properties, result) + return _hcnEnumerateEndpoints(_p0, endpoints, result) } -func _hcnQueryEndpointProperties(endpoint hcnEndpoint, query *uint16, properties **uint16, result **uint16) (hr error) { - if hr = procHcnQueryEndpointProperties.Find(); hr != nil { +func _hcnEnumerateEndpoints(query *uint16, endpoints **uint16, result **uint16) (hr error) { + hr = procHcnEnumerateEndpoints.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall6(procHcnQueryEndpointProperties.Addr(), 4, uintptr(endpoint), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) + r0, _, _ := syscall.Syscall(procHcnEnumerateEndpoints.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(endpoints)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -364,25 +377,21 @@ func _hcnQueryEndpointProperties(endpoint hcnEndpoint, query *uint16, properties return } -func hcnDeleteEndpoint(id *_guid, result **uint16) (hr error) { - if hr = procHcnDeleteEndpoint.Find(); hr != nil { +func hcnEnumerateLoadBalancers(query string, loadBalancers **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcnDeleteEndpoint.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return + return _hcnEnumerateLoadBalancers(_p0, loadBalancers, result) } -func hcnCloseEndpoint(endpoint hcnEndpoint) (hr error) { - if hr = procHcnCloseEndpoint.Find(); hr != nil { +func _hcnEnumerateLoadBalancers(query *uint16, loadBalancers **uint16, result **uint16) (hr error) { + hr = procHcnEnumerateLoadBalancers.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcnCloseEndpoint.Addr(), 1, uintptr(endpoint), 0, 0) + r0, _, _ := syscall.Syscall(procHcnEnumerateLoadBalancers.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(loadBalancers)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -402,7 +411,8 @@ func hcnEnumerateNamespaces(query string, namespaces **uint16, result **uint16) } func _hcnEnumerateNamespaces(query *uint16, namespaces **uint16, result **uint16) (hr error) { - if hr = procHcnEnumerateNamespaces.Find(); hr != nil { + hr = procHcnEnumerateNamespaces.Find() + if hr != nil { return } r0, _, _ := syscall.Syscall(procHcnEnumerateNamespaces.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(namespaces)), uintptr(unsafe.Pointer(result))) @@ -415,20 +425,21 @@ func _hcnEnumerateNamespaces(query *uint16, namespaces **uint16, result **uint16 return } -func hcnCreateNamespace(id *_guid, settings string, namespace *hcnNamespace, result **uint16) (hr error) { +func hcnEnumerateNetworks(query string, networks **uint16, result **uint16) (hr error) { var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) + _p0, hr = syscall.UTF16PtrFromString(query) if hr != nil { return } - return _hcnCreateNamespace(id, _p0, namespace, result) + return _hcnEnumerateNetworks(_p0, networks, result) } -func _hcnCreateNamespace(id *_guid, settings *uint16, namespace *hcnNamespace, result **uint16) (hr error) { - if hr = procHcnCreateNamespace.Find(); hr != nil { +func _hcnEnumerateNetworks(query *uint16, networks **uint16, result **uint16) (hr error) { + hr = procHcnEnumerateNetworks.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall6(procHcnCreateNamespace.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(namespace)), uintptr(unsafe.Pointer(result)), 0, 0) + r0, _, _ := syscall.Syscall(procHcnEnumerateNetworks.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(networks)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -438,11 +449,21 @@ func _hcnCreateNamespace(id *_guid, settings *uint16, namespace *hcnNamespace, r return } -func hcnOpenNamespace(id *_guid, namespace *hcnNamespace, result **uint16) (hr error) { - if hr = procHcnOpenNamespace.Find(); hr != nil { +func hcnEnumerateRoutes(query string, routes **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcnOpenNamespace.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(namespace)), uintptr(unsafe.Pointer(result))) + return _hcnEnumerateRoutes(_p0, routes, result) +} + +func _hcnEnumerateRoutes(query *uint16, routes **uint16, result **uint16) (hr error) { + hr = procHcnEnumerateSdnRoutes.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnEnumerateSdnRoutes.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(routes)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -452,20 +473,21 @@ func hcnOpenNamespace(id *_guid, namespace *hcnNamespace, result **uint16) (hr e return } -func hcnModifyNamespace(namespace hcnNamespace, settings string, result **uint16) (hr error) { +func hcnModifyEndpoint(endpoint hcnEndpoint, settings string, result **uint16) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(settings) if hr != nil { return } - return _hcnModifyNamespace(namespace, _p0, result) + return _hcnModifyEndpoint(endpoint, _p0, result) } -func _hcnModifyNamespace(namespace hcnNamespace, settings *uint16, result **uint16) (hr error) { - if hr = procHcnModifyNamespace.Find(); hr != nil { +func _hcnModifyEndpoint(endpoint hcnEndpoint, settings *uint16, result **uint16) (hr error) { + hr = procHcnModifyEndpoint.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcnModifyNamespace.Addr(), 3, uintptr(namespace), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.Syscall(procHcnModifyEndpoint.Addr(), 3, uintptr(endpoint), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -475,20 +497,21 @@ func _hcnModifyNamespace(namespace hcnNamespace, settings *uint16, result **uint return } -func hcnQueryNamespaceProperties(namespace hcnNamespace, query string, properties **uint16, result **uint16) (hr error) { +func hcnModifyLoadBalancer(loadBalancer hcnLoadBalancer, settings string, result **uint16) (hr error) { var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) + _p0, hr = syscall.UTF16PtrFromString(settings) if hr != nil { return } - return _hcnQueryNamespaceProperties(namespace, _p0, properties, result) + return _hcnModifyLoadBalancer(loadBalancer, _p0, result) } -func _hcnQueryNamespaceProperties(namespace hcnNamespace, query *uint16, properties **uint16, result **uint16) (hr error) { - if hr = procHcnQueryNamespaceProperties.Find(); hr != nil { +func _hcnModifyLoadBalancer(loadBalancer hcnLoadBalancer, settings *uint16, result **uint16) (hr error) { + hr = procHcnModifyLoadBalancer.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall6(procHcnQueryNamespaceProperties.Addr(), 4, uintptr(namespace), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) + r0, _, _ := syscall.Syscall(procHcnModifyLoadBalancer.Addr(), 3, uintptr(loadBalancer), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -498,25 +521,21 @@ func _hcnQueryNamespaceProperties(namespace hcnNamespace, query *uint16, propert return } -func hcnDeleteNamespace(id *_guid, result **uint16) (hr error) { - if hr = procHcnDeleteNamespace.Find(); hr != nil { +func hcnModifyNamespace(namespace hcnNamespace, settings string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcnDeleteNamespace.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return + return _hcnModifyNamespace(namespace, _p0, result) } -func hcnCloseNamespace(namespace hcnNamespace) (hr error) { - if hr = procHcnCloseNamespace.Find(); hr != nil { +func _hcnModifyNamespace(namespace hcnNamespace, settings *uint16, result **uint16) (hr error) { + hr = procHcnModifyNamespace.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcnCloseNamespace.Addr(), 1, uintptr(namespace), 0, 0) + r0, _, _ := syscall.Syscall(procHcnModifyNamespace.Addr(), 3, uintptr(namespace), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -526,20 +545,21 @@ func hcnCloseNamespace(namespace hcnNamespace) (hr error) { return } -func hcnEnumerateLoadBalancers(query string, loadBalancers **uint16, result **uint16) (hr error) { +func hcnModifyNetwork(network hcnNetwork, settings string, result **uint16) (hr error) { var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) + _p0, hr = syscall.UTF16PtrFromString(settings) if hr != nil { return } - return _hcnEnumerateLoadBalancers(_p0, loadBalancers, result) + return _hcnModifyNetwork(network, _p0, result) } -func _hcnEnumerateLoadBalancers(query *uint16, loadBalancers **uint16, result **uint16) (hr error) { - if hr = procHcnEnumerateLoadBalancers.Find(); hr != nil { +func _hcnModifyNetwork(network hcnNetwork, settings *uint16, result **uint16) (hr error) { + hr = procHcnModifyNetwork.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcnEnumerateLoadBalancers.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(loadBalancers)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.Syscall(procHcnModifyNetwork.Addr(), 3, uintptr(network), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -549,20 +569,21 @@ func _hcnEnumerateLoadBalancers(query *uint16, loadBalancers **uint16, result ** return } -func hcnCreateLoadBalancer(id *_guid, settings string, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) { +func hcnModifyRoute(route hcnRoute, settings string, result **uint16) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(settings) if hr != nil { return } - return _hcnCreateLoadBalancer(id, _p0, loadBalancer, result) + return _hcnModifyRoute(route, _p0, result) } -func _hcnCreateLoadBalancer(id *_guid, settings *uint16, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) { - if hr = procHcnCreateLoadBalancer.Find(); hr != nil { +func _hcnModifyRoute(route hcnRoute, settings *uint16, result **uint16) (hr error) { + hr = procHcnModifySdnRoute.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall6(procHcnCreateLoadBalancer.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(loadBalancer)), uintptr(unsafe.Pointer(result)), 0, 0) + r0, _, _ := syscall.Syscall(procHcnModifySdnRoute.Addr(), 3, uintptr(route), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -572,11 +593,12 @@ func _hcnCreateLoadBalancer(id *_guid, settings *uint16, loadBalancer *hcnLoadBa return } -func hcnOpenLoadBalancer(id *_guid, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) { - if hr = procHcnOpenLoadBalancer.Find(); hr != nil { +func hcnOpenEndpoint(id *_guid, endpoint *hcnEndpoint, result **uint16) (hr error) { + hr = procHcnOpenEndpoint.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcnOpenLoadBalancer.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(loadBalancer)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.Syscall(procHcnOpenEndpoint.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(endpoint)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -586,20 +608,12 @@ func hcnOpenLoadBalancer(id *_guid, loadBalancer *hcnLoadBalancer, result **uint return } -func hcnModifyLoadBalancer(loadBalancer hcnLoadBalancer, settings string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) +func hcnOpenLoadBalancer(id *_guid, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) { + hr = procHcnOpenLoadBalancer.Find() if hr != nil { return } - return _hcnModifyLoadBalancer(loadBalancer, _p0, result) -} - -func _hcnModifyLoadBalancer(loadBalancer hcnLoadBalancer, settings *uint16, result **uint16) (hr error) { - if hr = procHcnModifyLoadBalancer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnModifyLoadBalancer.Addr(), 3, uintptr(loadBalancer), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.Syscall(procHcnOpenLoadBalancer.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(loadBalancer)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -609,20 +623,12 @@ func _hcnModifyLoadBalancer(loadBalancer hcnLoadBalancer, settings *uint16, resu return } -func hcnQueryLoadBalancerProperties(loadBalancer hcnLoadBalancer, query string, properties **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) +func hcnOpenNamespace(id *_guid, namespace *hcnNamespace, result **uint16) (hr error) { + hr = procHcnOpenNamespace.Find() if hr != nil { return } - return _hcnQueryLoadBalancerProperties(loadBalancer, _p0, properties, result) -} - -func _hcnQueryLoadBalancerProperties(loadBalancer hcnLoadBalancer, query *uint16, properties **uint16, result **uint16) (hr error) { - if hr = procHcnQueryLoadBalancerProperties.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcnQueryLoadBalancerProperties.Addr(), 4, uintptr(loadBalancer), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) + r0, _, _ := syscall.Syscall(procHcnOpenNamespace.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(namespace)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -632,11 +638,12 @@ func _hcnQueryLoadBalancerProperties(loadBalancer hcnLoadBalancer, query *uint16 return } -func hcnDeleteLoadBalancer(id *_guid, result **uint16) (hr error) { - if hr = procHcnDeleteLoadBalancer.Find(); hr != nil { +func hcnOpenNetwork(id *_guid, network *hcnNetwork, result **uint16) (hr error) { + hr = procHcnOpenNetwork.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcnDeleteLoadBalancer.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) + r0, _, _ := syscall.Syscall(procHcnOpenNetwork.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(network)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -646,11 +653,12 @@ func hcnDeleteLoadBalancer(id *_guid, result **uint16) (hr error) { return } -func hcnCloseLoadBalancer(loadBalancer hcnLoadBalancer) (hr error) { - if hr = procHcnCloseLoadBalancer.Find(); hr != nil { +func hcnOpenRoute(id *_guid, route *hcnRoute, result **uint16) (hr error) { + hr = procHcnOpenSdnRoute.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcnCloseLoadBalancer.Addr(), 1, uintptr(loadBalancer), 0, 0) + r0, _, _ := syscall.Syscall(procHcnOpenSdnRoute.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(route)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -660,20 +668,21 @@ func hcnCloseLoadBalancer(loadBalancer hcnLoadBalancer) (hr error) { return } -func hcnEnumerateRoutes(query string, routes **uint16, result **uint16) (hr error) { +func hcnQueryEndpointProperties(endpoint hcnEndpoint, query string, properties **uint16, result **uint16) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(query) if hr != nil { return } - return _hcnEnumerateRoutes(_p0, routes, result) + return _hcnQueryEndpointProperties(endpoint, _p0, properties, result) } -func _hcnEnumerateRoutes(query *uint16, routes **uint16, result **uint16) (hr error) { - if hr = procHcnEnumerateSdnRoutes.Find(); hr != nil { +func _hcnQueryEndpointProperties(endpoint hcnEndpoint, query *uint16, properties **uint16, result **uint16) (hr error) { + hr = procHcnQueryEndpointProperties.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcnEnumerateSdnRoutes.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(routes)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.Syscall6(procHcnQueryEndpointProperties.Addr(), 4, uintptr(endpoint), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -683,20 +692,21 @@ func _hcnEnumerateRoutes(query *uint16, routes **uint16, result **uint16) (hr er return } -func hcnCreateRoute(id *_guid, settings string, route *hcnRoute, result **uint16) (hr error) { +func hcnQueryLoadBalancerProperties(loadBalancer hcnLoadBalancer, query string, properties **uint16, result **uint16) (hr error) { var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) + _p0, hr = syscall.UTF16PtrFromString(query) if hr != nil { return } - return _hcnCreateRoute(id, _p0, route, result) + return _hcnQueryLoadBalancerProperties(loadBalancer, _p0, properties, result) } -func _hcnCreateRoute(id *_guid, settings *uint16, route *hcnRoute, result **uint16) (hr error) { - if hr = procHcnCreateSdnRoute.Find(); hr != nil { +func _hcnQueryLoadBalancerProperties(loadBalancer hcnLoadBalancer, query *uint16, properties **uint16, result **uint16) (hr error) { + hr = procHcnQueryLoadBalancerProperties.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall6(procHcnCreateSdnRoute.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(route)), uintptr(unsafe.Pointer(result)), 0, 0) + r0, _, _ := syscall.Syscall6(procHcnQueryLoadBalancerProperties.Addr(), 4, uintptr(loadBalancer), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -706,11 +716,21 @@ func _hcnCreateRoute(id *_guid, settings *uint16, route *hcnRoute, result **uint return } -func hcnOpenRoute(id *_guid, route *hcnRoute, result **uint16) (hr error) { - if hr = procHcnOpenSdnRoute.Find(); hr != nil { +func hcnQueryNamespaceProperties(namespace hcnNamespace, query string, properties **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcnOpenSdnRoute.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(route)), uintptr(unsafe.Pointer(result))) + return _hcnQueryNamespaceProperties(namespace, _p0, properties, result) +} + +func _hcnQueryNamespaceProperties(namespace hcnNamespace, query *uint16, properties **uint16, result **uint16) (hr error) { + hr = procHcnQueryNamespaceProperties.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnQueryNamespaceProperties.Addr(), 4, uintptr(namespace), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -720,20 +740,21 @@ func hcnOpenRoute(id *_guid, route *hcnRoute, result **uint16) (hr error) { return } -func hcnModifyRoute(route hcnRoute, settings string, result **uint16) (hr error) { +func hcnQueryNetworkProperties(network hcnNetwork, query string, properties **uint16, result **uint16) (hr error) { var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) + _p0, hr = syscall.UTF16PtrFromString(query) if hr != nil { return } - return _hcnModifyRoute(route, _p0, result) + return _hcnQueryNetworkProperties(network, _p0, properties, result) } -func _hcnModifyRoute(route hcnRoute, settings *uint16, result **uint16) (hr error) { - if hr = procHcnModifySdnRoute.Find(); hr != nil { +func _hcnQueryNetworkProperties(network hcnNetwork, query *uint16, properties **uint16, result **uint16) (hr error) { + hr = procHcnQueryNetworkProperties.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcnModifySdnRoute.Addr(), 3, uintptr(route), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.Syscall6(procHcnQueryNetworkProperties.Addr(), 4, uintptr(network), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -753,7 +774,8 @@ func hcnQueryRouteProperties(route hcnRoute, query string, properties **uint16, } func _hcnQueryRouteProperties(route hcnRoute, query *uint16, properties **uint16, result **uint16) (hr error) { - if hr = procHcnQuerySdnRouteProperties.Find(); hr != nil { + hr = procHcnQuerySdnRouteProperties.Find() + if hr != nil { return } r0, _, _ := syscall.Syscall6(procHcnQuerySdnRouteProperties.Addr(), 4, uintptr(route), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) @@ -766,11 +788,8 @@ func _hcnQueryRouteProperties(route hcnRoute, query *uint16, properties **uint16 return } -func hcnDeleteRoute(id *_guid, result **uint16) (hr error) { - if hr = procHcnDeleteSdnRoute.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcnDeleteSdnRoute.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) +func SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) { + r0, _, _ := syscall.Syscall(procSetCurrentThreadCompartmentId.Addr(), 1, uintptr(compartmentId), 0, 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -780,11 +799,31 @@ func hcnDeleteRoute(id *_guid, result **uint16) (hr error) { return } -func hcnCloseRoute(route hcnRoute) (hr error) { - if hr = procHcnCloseSdnRoute.Find(); hr != nil { +func _hnsCall(method string, path string, object string, response **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(method) + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcnCloseSdnRoute.Addr(), 1, uintptr(route), 0, 0) + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(object) + if hr != nil { + return + } + return __hnsCall(_p0, _p1, _p2, response) +} + +func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16) (hr error) { + hr = procHNSCall.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHNSCall.Addr(), 4, uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)), 0, 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff diff --git a/vendor/github.com/Microsoft/hcsshim/hcsshim.go b/vendor/github.com/Microsoft/hcsshim/hcsshim.go index ceb3ac85ee467..13f80e4a81c95 100644 --- a/vendor/github.com/Microsoft/hcsshim/hcsshim.go +++ b/vendor/github.com/Microsoft/hcsshim/hcsshim.go @@ -1,15 +1,17 @@ +//go:build windows + // Shim for the Host Compute Service (HCS) to manage Windows Server // containers and Hyper-V containers. package hcsshim import ( - "syscall" + "golang.org/x/sys/windows" "github.com/Microsoft/hcsshim/internal/hcserror" ) -//go:generate go run mksyscall_windows.go -output zsyscall_windows.go hcsshim.go +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go hcsshim.go //sys SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) = iphlpapi.SetCurrentThreadCompartmentId @@ -17,9 +19,9 @@ const ( // Specific user-visible exit codes WaitErrExecFailed = 32767 - ERROR_GEN_FAILURE = hcserror.ERROR_GEN_FAILURE - ERROR_SHUTDOWN_IN_PROGRESS = syscall.Errno(1115) - WSAEINVAL = syscall.Errno(10022) + ERROR_GEN_FAILURE = windows.ERROR_GEN_FAILURE + ERROR_SHUTDOWN_IN_PROGRESS = windows.ERROR_SHUTDOWN_IN_PROGRESS + WSAEINVAL = windows.WSAEINVAL // Timeout on wait calls TimeoutInfinite = 0xFFFFFFFF diff --git a/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go index 9e0059447d9f4..d8a73de98d0ca 100644 --- a/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go +++ b/vendor/github.com/Microsoft/hcsshim/hnsendpoint.go @@ -1,3 +1,5 @@ +//go:build windows + package hcsshim import ( @@ -13,7 +15,7 @@ type HNSEndpointStats = hns.EndpointStats // Namespace represents a Compartment. type Namespace = hns.Namespace -//SystemType represents the type of the system on which actions are done +// SystemType represents the type of the system on which actions are done type SystemType string // SystemType const diff --git a/vendor/github.com/Microsoft/hcsshim/hnsglobals.go b/vendor/github.com/Microsoft/hcsshim/hnsglobals.go index 2b538190476ea..c564bf4a351f6 100644 --- a/vendor/github.com/Microsoft/hcsshim/hnsglobals.go +++ b/vendor/github.com/Microsoft/hcsshim/hnsglobals.go @@ -1,3 +1,5 @@ +//go:build windows + package hcsshim import ( diff --git a/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go b/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go index f775fa1d07c9a..925c2124959fa 100644 --- a/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go +++ b/vendor/github.com/Microsoft/hcsshim/hnsnetwork.go @@ -1,14 +1,16 @@ +//go:build windows + package hcsshim import ( "github.com/Microsoft/hcsshim/internal/hns" ) -// Subnet is assoicated with a network and represents a list +// Subnet is associated with a network and represents a list // of subnets available to the network type Subnet = hns.Subnet -// MacPool is assoicated with a network and represents a list +// MacPool is associated with a network and represents a list // of macaddresses available to the network type MacPool = hns.MacPool diff --git a/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go b/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go index 55aaa4a50efb3..9bfe61ee83b10 100644 --- a/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go +++ b/vendor/github.com/Microsoft/hcsshim/hnspolicylist.go @@ -1,3 +1,5 @@ +//go:build windows + package hcsshim import ( diff --git a/vendor/github.com/Microsoft/hcsshim/hnssupport.go b/vendor/github.com/Microsoft/hcsshim/hnssupport.go index 69405244b6734..d97681e0ca9e7 100644 --- a/vendor/github.com/Microsoft/hcsshim/hnssupport.go +++ b/vendor/github.com/Microsoft/hcsshim/hnssupport.go @@ -1,3 +1,5 @@ +//go:build windows + package hcsshim import ( diff --git a/vendor/github.com/Microsoft/hcsshim/interface.go b/vendor/github.com/Microsoft/hcsshim/interface.go index 300eb59966874..81a2819516a31 100644 --- a/vendor/github.com/Microsoft/hcsshim/interface.go +++ b/vendor/github.com/Microsoft/hcsshim/interface.go @@ -1,3 +1,5 @@ +//go:build windows + package hcsshim import ( diff --git a/vendor/github.com/Microsoft/hcsshim/internal/cni/doc.go b/vendor/github.com/Microsoft/hcsshim/internal/cni/doc.go new file mode 100644 index 0000000000000..b94015b5aabb1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/cni/doc.go @@ -0,0 +1 @@ +package cni diff --git a/vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go b/vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go index 4a4fcea843f1c..3543a590d073a 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go @@ -1,3 +1,5 @@ +//go:build windows + package cni import ( @@ -84,7 +86,7 @@ func (pnc *PersistedNamespaceConfig) Store() error { } // Remove removes any persisted state associated with this config. If the config -// is not found in the registery `Remove` returns no error. +// is not found in the registry `Remove` returns no error. func (pnc *PersistedNamespaceConfig) Remove() error { if pnc.stored { sk, err := regstate.Open(cniRoot, false) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go b/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go index f46af33bb6506..b60cd383b6218 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/cow/cow.go @@ -1,3 +1,5 @@ +//go:build windows + package cow import ( diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go index d13772b0301eb..7b27173c3a93b 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/callback.go @@ -1,3 +1,5 @@ +//go:build windows + package hcs import ( diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/doc.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/doc.go new file mode 100644 index 0000000000000..d792dda9861e8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/doc.go @@ -0,0 +1 @@ +package hcs diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go index 295d4b849c3ec..3e10f5c7e03d6 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/errors.go @@ -1,3 +1,5 @@ +//go:build windows + package hcs import ( @@ -51,6 +53,9 @@ var ( // ErrUnexpectedValue is an error encountered when hcs returns an invalid value ErrUnexpectedValue = errors.New("unexpected value returned from hcs") + // ErrOperationDenied is an error when hcs attempts an operation that is explicitly denied + ErrOperationDenied = errors.New("operation denied") + // ErrVmcomputeAlreadyStopped is an error encountered when a shutdown or terminate request is made on a stopped container ErrVmcomputeAlreadyStopped = syscall.Errno(0xc0370110) @@ -82,7 +87,7 @@ var ( // ErrProcessAlreadyStopped is returned by hcs if the process we're trying to kill has already been stopped. ErrProcessAlreadyStopped = syscall.Errno(0x8037011f) - // ErrInvalidHandle is an error that can be encountrered when querying the properties of a compute system when the handle to that + // ErrInvalidHandle is an error that can be encountered when querying the properties of a compute system when the handle to that // compute system has already been closed. ErrInvalidHandle = syscall.Errno(0x6) ) @@ -152,33 +157,38 @@ func (e *HcsError) Error() string { return s } +func (e *HcsError) Is(target error) bool { + return errors.Is(e.Err, target) +} + +// unwrap isnt really needed, but helpful convince function + +func (e *HcsError) Unwrap() error { + return e.Err +} + +// Deprecated: net.Error.Temporary is deprecated. func (e *HcsError) Temporary() bool { - err, ok := e.Err.(net.Error) - return ok && err.Temporary() //nolint:staticcheck + err := e.netError() + return (err != nil) && err.Temporary() } func (e *HcsError) Timeout() bool { - err, ok := e.Err.(net.Error) - return ok && err.Timeout() + err := e.netError() + return (err != nil) && err.Timeout() } -// ProcessError is an error encountered in HCS during an operation on a Process object -type ProcessError struct { - SystemID string - Pid int - Op string - Err error - Events []ErrorEvent +func (e *HcsError) netError() (err net.Error) { + if errors.As(e.Unwrap(), &err) { + return err + } + return nil } -var _ net.Error = &ProcessError{} - // SystemError is an error encountered in HCS during an operation on a Container object type SystemError struct { - ID string - Op string - Err error - Events []ErrorEvent + HcsError + ID string } var _ net.Error = &SystemError{} @@ -191,29 +201,32 @@ func (e *SystemError) Error() string { return s } -func (e *SystemError) Temporary() bool { - err, ok := e.Err.(net.Error) - return ok && err.Temporary() //nolint:staticcheck -} - -func (e *SystemError) Timeout() bool { - err, ok := e.Err.(net.Error) - return ok && err.Timeout() -} - func makeSystemError(system *System, op string, err error, events []ErrorEvent) error { // Don't double wrap errors - if _, ok := err.(*SystemError); ok { + var e *SystemError + if errors.As(err, &e) { return err } + return &SystemError{ - ID: system.ID(), - Op: op, - Err: err, - Events: events, + ID: system.ID(), + HcsError: HcsError{ + Op: op, + Err: err, + Events: events, + }, } } +// ProcessError is an error encountered in HCS during an operation on a Process object +type ProcessError struct { + HcsError + SystemID string + Pid int +} + +var _ net.Error = &ProcessError{} + func (e *ProcessError) Error() string { s := fmt.Sprintf("%s %s:%d: %s", e.Op, e.SystemID, e.Pid, e.Err.Error()) for _, ev := range e.Events { @@ -222,27 +235,20 @@ func (e *ProcessError) Error() string { return s } -func (e *ProcessError) Temporary() bool { - err, ok := e.Err.(net.Error) - return ok && err.Temporary() //nolint:staticcheck -} - -func (e *ProcessError) Timeout() bool { - err, ok := e.Err.(net.Error) - return ok && err.Timeout() -} - func makeProcessError(process *Process, op string, err error, events []ErrorEvent) error { // Don't double wrap errors - if _, ok := err.(*ProcessError); ok { + var e *ProcessError + if errors.As(err, &e) { return err } return &ProcessError{ Pid: process.Pid(), SystemID: process.SystemID(), - Op: op, - Err: err, - Events: events, + HcsError: HcsError{ + Op: op, + Err: err, + Events: events, + }, } } @@ -251,41 +257,41 @@ func makeProcessError(process *Process, op string, err error, events []ErrorEven // already exited, or does not exist. Both IsAlreadyStopped and IsNotExist // will currently return true when the error is ErrElementNotFound. func IsNotExist(err error) bool { - err = getInnerError(err) - return err == ErrComputeSystemDoesNotExist || - err == ErrElementNotFound + return IsAny(err, ErrComputeSystemDoesNotExist, ErrElementNotFound) } // IsErrorInvalidHandle checks whether the error is the result of an operation carried // out on a handle that is invalid/closed. This error popped up while trying to query // stats on a container in the process of being stopped. func IsErrorInvalidHandle(err error) bool { - err = getInnerError(err) - return err == ErrInvalidHandle + return errors.Is(err, ErrInvalidHandle) } // IsAlreadyClosed checks if an error is caused by the Container or Process having been // already closed by a call to the Close() method. func IsAlreadyClosed(err error) bool { - err = getInnerError(err) - return err == ErrAlreadyClosed + return errors.Is(err, ErrAlreadyClosed) } // IsPending returns a boolean indicating whether the error is that // the requested operation is being completed in the background. func IsPending(err error) bool { - err = getInnerError(err) - return err == ErrVmcomputeOperationPending + return errors.Is(err, ErrVmcomputeOperationPending) } // IsTimeout returns a boolean indicating whether the error is caused by // a timeout waiting for the operation to complete. func IsTimeout(err error) bool { - if err, ok := err.(net.Error); ok && err.Timeout() { + // HcsError and co. implement Timeout regardless of whether the errors they wrap do, + // so `errors.As(err, net.Error)`` will always be true. + // Using `errors.As(err.Unwrap(), net.Err)` wont work for general errors. + // So first check if there an `ErrTimeout` in the chain, then convert to a net error. + if errors.Is(err, ErrTimeout) { return true } - err = getInnerError(err) - return err == ErrTimeout + + var nerr net.Error + return errors.As(err, &nerr) && nerr.Timeout() } // IsAlreadyStopped returns a boolean indicating whether the error is caused by @@ -294,10 +300,7 @@ func IsTimeout(err error) bool { // already exited, or does not exist. Both IsAlreadyStopped and IsNotExist // will currently return true when the error is ErrElementNotFound. func IsAlreadyStopped(err error) bool { - err = getInnerError(err) - return err == ErrVmcomputeAlreadyStopped || - err == ErrProcessAlreadyStopped || - err == ErrElementNotFound + return IsAny(err, ErrVmcomputeAlreadyStopped, ErrProcessAlreadyStopped, ErrElementNotFound) } // IsNotSupported returns a boolean indicating whether the error is caused by @@ -306,38 +309,28 @@ func IsAlreadyStopped(err error) bool { // ErrVmcomputeInvalidJSON, ErrInvalidData, ErrNotSupported or ErrVmcomputeUnknownMessage // is thrown from the Platform func IsNotSupported(err error) bool { - err = getInnerError(err) // If Platform doesn't recognize or support the request sent, below errors are seen - return err == ErrVmcomputeInvalidJSON || - err == ErrInvalidData || - err == ErrNotSupported || - err == ErrVmcomputeUnknownMessage + return IsAny(err, ErrVmcomputeInvalidJSON, ErrInvalidData, ErrNotSupported, ErrVmcomputeUnknownMessage) } // IsOperationInvalidState returns true when err is caused by // `ErrVmcomputeOperationInvalidState`. func IsOperationInvalidState(err error) bool { - err = getInnerError(err) - return err == ErrVmcomputeOperationInvalidState + return errors.Is(err, ErrVmcomputeOperationInvalidState) } // IsAccessIsDenied returns true when err is caused by // `ErrVmcomputeOperationAccessIsDenied`. func IsAccessIsDenied(err error) bool { - err = getInnerError(err) - return err == ErrVmcomputeOperationAccessIsDenied + return errors.Is(err, ErrVmcomputeOperationAccessIsDenied) } -func getInnerError(err error) error { - switch pe := err.(type) { - case nil: - return nil - case *HcsError: - err = pe.Err - case *SystemError: - err = pe.Err - case *ProcessError: - err = pe.Err +// IsAny is a vectorized version of [errors.Is], it returns true if err is one of targets. +func IsAny(err error, targets ...error) bool { + for _, e := range targets { + if errors.Is(err, e) { + return true + } } - return err + return false } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go index 78490d6cdd220..e437e297c0d91 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go @@ -1,3 +1,5 @@ +//go:build windows + package hcs import ( @@ -10,6 +12,7 @@ import ( "syscall" "time" + "github.com/Microsoft/hcsshim/internal/cow" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/oc" "github.com/Microsoft/hcsshim/internal/vmcompute" @@ -36,6 +39,8 @@ type Process struct { waitError error } +var _ cow.Process = &Process{} + func newProcess(process vmcompute.HcsProcess, processID int, computeSystem *System) *Process { return &Process{ handle: process, @@ -89,10 +94,7 @@ func (process *Process) processSignalResult(ctx context.Context, err error) (boo case nil: return true, nil case ErrVmcomputeOperationInvalidState, ErrComputeSystemDoesNotExist, ErrElementNotFound: - select { - case <-process.waitBlock: - // The process exit notification has already arrived. - default: + if !process.stopped() { // The process should be gone, but we have not received the notification. // After a second, force unblock the process wait to work around a possible // deadlock in the HCS. @@ -114,9 +116,9 @@ func (process *Process) processSignalResult(ctx context.Context, err error) (boo // Signal signals the process with `options`. // -// For LCOW `guestrequest.SignalProcessOptionsLCOW`. +// For LCOW `guestresource.SignalProcessOptionsLCOW`. // -// For WCOW `guestrequest.SignalProcessOptionsWCOW`. +// For WCOW `guestresource.SignalProcessOptionsWCOW`. func (process *Process) Signal(ctx context.Context, options interface{}) (bool, error) { process.handleLock.RLock() defer process.handleLock.RUnlock() @@ -152,6 +154,10 @@ func (process *Process) Kill(ctx context.Context) (bool, error) { return false, makeProcessError(process, operation, ErrAlreadyClosed, nil) } + if process.stopped() { + return false, makeProcessError(process, operation, ErrProcessAlreadyStopped, nil) + } + if process.killSignalDelivered { // A kill signal has already been sent to this process. Sending a second // one offers no real benefit, as processes cannot stop themselves from @@ -233,7 +239,7 @@ func (process *Process) Kill(ctx context.Context) (bool, error) { // call multiple times. func (process *Process) waitBackground() { operation := "hcs::Process::waitBackground" - ctx, span := trace.StartSpan(context.Background(), operation) + ctx, span := oc.StartSpan(context.Background(), operation) defer span.End() span.AddAttributes( trace.StringAttribute("cid", process.SystemID()), @@ -259,12 +265,12 @@ func (process *Process) waitBackground() { propertiesJSON, resultJSON, err = vmcompute.HcsGetProcessProperties(ctx, process.handle) events := processHcsResult(ctx, resultJSON) if err != nil { - err = makeProcessError(process, operation, err, events) //nolint:ineffassign + err = makeProcessError(process, operation, err, events) } else { properties := &processStatus{} err = json.Unmarshal([]byte(propertiesJSON), properties) if err != nil { - err = makeProcessError(process, operation, err, nil) //nolint:ineffassign + err = makeProcessError(process, operation, err, nil) } else { if properties.LastWaitResult != 0 { log.G(ctx).WithField("wait-result", properties.LastWaitResult).Warning("non-zero last wait result") @@ -286,12 +292,22 @@ func (process *Process) waitBackground() { } // Wait waits for the process to exit. If the process has already exited returns -// the pervious error (if any). +// the previous error (if any). func (process *Process) Wait() error { <-process.waitBlock return process.waitError } +// Exited returns if the process has stopped +func (process *Process) stopped() bool { + select { + case <-process.waitBlock: + return true + default: + return false + } +} + // ResizeConsole resizes the console of the process. func (process *Process) ResizeConsole(ctx context.Context, width, height uint16) error { process.handleLock.RLock() @@ -328,15 +344,13 @@ func (process *Process) ResizeConsole(ctx context.Context, width, height uint16) // ExitCode returns the exit code of the process. The process must have // already terminated. func (process *Process) ExitCode() (int, error) { - select { - case <-process.waitBlock: - if process.waitError != nil { - return -1, process.waitError - } - return process.exitCode, nil - default: + if !process.stopped() { return -1, makeProcessError(process, "hcs::Process::ExitCode", ErrInvalidProcessState, nil) } + if process.waitError != nil { + return -1, process.waitError + } + return process.exitCode, nil } // StdioLegacy returns the stdin, stdout, and stderr pipes, respectively. Closing @@ -344,7 +358,7 @@ func (process *Process) ExitCode() (int, error) { // are the responsibility of the caller to close. func (process *Process) StdioLegacy() (_ io.WriteCloser, _ io.ReadCloser, _ io.ReadCloser, err error) { operation := "hcs::Process::StdioLegacy" - ctx, span := trace.StartSpan(context.Background(), operation) + ctx, span := oc.StartSpan(context.Background(), operation) defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes( @@ -382,7 +396,7 @@ func (process *Process) StdioLegacy() (_ io.WriteCloser, _ io.ReadCloser, _ io.R } // Stdio returns the stdin, stdout, and stderr pipes, respectively. -// To close them, close the process handle. +// To close them, close the process handle, or use the `CloseStd*` functions. func (process *Process) Stdio() (stdin io.Writer, stdout, stderr io.Reader) { process.stdioLock.Lock() defer process.stdioLock.Unlock() @@ -391,46 +405,57 @@ func (process *Process) Stdio() (stdin io.Writer, stdout, stderr io.Reader) { // CloseStdin closes the write side of the stdin pipe so that the process is // notified on the read side that there is no more data in stdin. -func (process *Process) CloseStdin(ctx context.Context) error { +func (process *Process) CloseStdin(ctx context.Context) (err error) { + operation := "hcs::Process::CloseStdin" + ctx, span := trace.StartSpan(ctx, operation) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes( + trace.StringAttribute("cid", process.SystemID()), + trace.Int64Attribute("pid", int64(process.processID))) + process.handleLock.RLock() defer process.handleLock.RUnlock() - operation := "hcs::Process::CloseStdin" - if process.handle == 0 { return makeProcessError(process, operation, ErrAlreadyClosed, nil) } - modifyRequest := processModifyRequest{ - Operation: modifyCloseHandle, - CloseHandle: &closeHandle{ - Handle: stdIn, - }, + process.stdioLock.Lock() + defer process.stdioLock.Unlock() + if process.stdin == nil { + return nil } - modifyRequestb, err := json.Marshal(modifyRequest) - if err != nil { - return err - } + //HcsModifyProcess request to close stdin will fail if the process has already exited + if !process.stopped() { + modifyRequest := processModifyRequest{ + Operation: modifyCloseHandle, + CloseHandle: &closeHandle{ + Handle: stdIn, + }, + } - resultJSON, err := vmcompute.HcsModifyProcess(ctx, process.handle, string(modifyRequestb)) - events := processHcsResult(ctx, resultJSON) - if err != nil { - return makeProcessError(process, operation, err, events) - } + modifyRequestb, err := json.Marshal(modifyRequest) + if err != nil { + return err + } - process.stdioLock.Lock() - if process.stdin != nil { - process.stdin.Close() - process.stdin = nil + resultJSON, err := vmcompute.HcsModifyProcess(ctx, process.handle, string(modifyRequestb)) + events := processHcsResult(ctx, resultJSON) + if err != nil { + return makeProcessError(process, operation, err, events) + } } - process.stdioLock.Unlock() + + process.stdin.Close() + process.stdin = nil return nil } func (process *Process) CloseStdout(ctx context.Context) (err error) { - ctx, span := trace.StartSpan(ctx, "hcs::Process::CloseStdout") //nolint:ineffassign,staticcheck + ctx, span := oc.StartSpan(ctx, "hcs::Process::CloseStdout") //nolint:ineffassign,staticcheck defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes( @@ -454,7 +479,7 @@ func (process *Process) CloseStdout(ctx context.Context) (err error) { } func (process *Process) CloseStderr(ctx context.Context) (err error) { - ctx, span := trace.StartSpan(ctx, "hcs::Process::CloseStderr") //nolint:ineffassign,staticcheck + ctx, span := oc.StartSpan(ctx, "hcs::Process::CloseStderr") //nolint:ineffassign,staticcheck defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes( @@ -473,7 +498,6 @@ func (process *Process) CloseStderr(ctx context.Context) (err error) { if process.stderr != nil { process.stderr.Close() process.stderr = nil - } return nil } @@ -482,7 +506,7 @@ func (process *Process) CloseStderr(ctx context.Context) (err error) { // or wait on it. func (process *Process) Close() (err error) { operation := "hcs::Process::Close" - ctx, span := trace.StartSpan(context.Background(), operation) + ctx, span := oc.StartSpan(context.Background(), operation) defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes( diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go index b621c55938836..d1f219cfad006 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema1/schema1.go @@ -1,3 +1,5 @@ +//go:build windows + package schema1 import ( @@ -101,7 +103,7 @@ type ContainerConfig struct { HvRuntime *HvRuntime `json:",omitempty"` // Hyper-V container settings. Used by Hyper-V containers only. Format ImagePath=%root%\BaseLayerID\UtilityVM Servicing bool `json:",omitempty"` // True if this container is for servicing AllowUnqualifiedDNSQuery bool `json:",omitempty"` // True to allow unqualified DNS name resolution - DNSSearchList string `json:",omitempty"` // Comma seperated list of DNS suffixes to use for name resolution + DNSSearchList string `json:",omitempty"` // Comma separated list of DNS suffixes to use for name resolution ContainerType string `json:",omitempty"` // "Linux" for Linux containers on Windows. Omitted otherwise. TerminateOnLastHandleClosed bool `json:",omitempty"` // Should HCS terminate the container once all handles have been closed MappedVirtualDisks []MappedVirtualDisk `json:",omitempty"` // Array of virtual disks to mount at start diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go index bbad6a2c450ab..31fe07c3aa770 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cpu_group_property.go @@ -9,6 +9,14 @@ package hcsschema +type CPUGroupPropertyCode uint32 + +const ( + CPUCapacityProperty = 0x00010000 + CPUSchedulingPriorityProperty = 0x00020000 + IdleLPReserveProperty = 0x00030000 +) + type CpuGroupProperty struct { PropertyCode uint32 `json:"PropertyCode,omitempty"` PropertyValue uint32 `json:"PropertyValue,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/debug_options.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/debug_options.go new file mode 100644 index 0000000000000..5385850fe42c0 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/debug_options.go @@ -0,0 +1,22 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type DebugOptions struct { + // BugcheckSavedStateFileName is the path for the file in which the guest VM state will be saved when + // the guest crashes. + BugcheckSavedStateFileName string `json:"BugcheckSavedStateFileName,omitempty"` + // BugcheckNoCrashdumpSavedStateFileName is the path of the file in which the guest VM state will be + // saved when the guest crashes but the guest isn't able to generate the crash dump. This usually + // happens in early boot failures. + BugcheckNoCrashdumpSavedStateFileName string `json:"BugcheckNoCrashdumpSavedStateFileName,omitempty"` + TripleFaultSavedStateFileName string `json:"TripleFaultSavedStateFileName,omitempty"` + FirmwareDumpFileName string `json:"FirmwareDumpFileName,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go index ef1eec88656fd..a48a6539456b5 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/guest_state.go @@ -14,6 +14,9 @@ type GuestState struct { // The path to an existing file uses for persistent guest state storage. An empty string indicates the system should initialize new transient, in-memory guest state. GuestStateFilePath string `json:"GuestStateFilePath,omitempty"` + // The guest state file type affected by different guest isolation modes - whether a file or block storage. + GuestStateFileType string `json:"GuestStateFileType,omitempty"` + // The path to an existing file for persistent runtime state storage. An empty string indicates the system should initialize new transient, in-memory runtime state. RuntimeStateFilePath string `json:"RuntimeStateFilePath,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/isolation_settings.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/isolation_settings.go new file mode 100644 index 0000000000000..3726a297e1272 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/isolation_settings.go @@ -0,0 +1,21 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type IsolationSettings struct { + // Guest isolation type options to decide virtual trust levels of virtual machine + IsolationType string `json:"IsolationType,omitempty"` + // Configuration to debug HCL layer for HCS VM TODO: Task 31102306: Miss the way to prevent the exposure of private debug configuration in HCS TODO: Think about the secret configurations which are private in VMMS VM (only edit by hvsedit) + DebugHost string `json:"DebugHost,omitempty"` + DebugPort int64 `json:"DebugPort,omitempty"` + // Optional data passed by host on isolated virtual machine start + LaunchData string `json:"LaunchData,omitempty"` + HclEnabled bool `json:"HclEnabled,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go index d29455a3e4345..6364da8e2374b 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/modify_setting_request.go @@ -9,10 +9,12 @@ package hcsschema +import "github.com/Microsoft/hcsshim/internal/protocol/guestrequest" + type ModifySettingRequest struct { ResourcePath string `json:"ResourcePath,omitempty"` - RequestType string `json:"RequestType,omitempty"` + RequestType guestrequest.RequestType `json:"RequestType,omitempty"` // NOTE: Swagger generated as string. Locally updated. Settings interface{} `json:"Settings,omitempty"` // NOTE: Swagger generated as *interface{}. Locally updated diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/security_settings.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/security_settings.go new file mode 100644 index 0000000000000..14f0299e32e0a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/security_settings.go @@ -0,0 +1,16 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.4 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type SecuritySettings struct { + // Enablement of Trusted Platform Module on the computer system + EnableTpm bool `json:"EnableTpm,omitempty"` + Isolation *IsolationSettings `json:"Isolation,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/system_time.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/system_time.go new file mode 100644 index 0000000000000..72de8014930c8 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/system_time.go @@ -0,0 +1,28 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type SystemTime struct { + Year int32 `json:"Year,omitempty"` + + Month int32 `json:"Month,omitempty"` + + DayOfWeek int32 `json:"DayOfWeek,omitempty"` + + Day int32 `json:"Day,omitempty"` + + Hour int32 `json:"Hour,omitempty"` + + Minute int32 `json:"Minute,omitempty"` + + Second int32 `json:"Second,omitempty"` + + Milliseconds int32 `json:"Milliseconds,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/time_zone_information.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/time_zone_information.go new file mode 100644 index 0000000000000..529743d753254 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/time_zone_information.go @@ -0,0 +1,26 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.1 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +type TimeZoneInformation struct { + Bias int32 `json:"Bias,omitempty"` + + StandardName string `json:"StandardName,omitempty"` + + StandardDate *SystemTime `json:"StandardDate,omitempty"` + + StandardBias int32 `json:"StandardBias,omitempty"` + + DaylightName string `json:"DaylightName,omitempty"` + + DaylightDate *SystemTime `json:"DaylightDate,omitempty"` + + DaylightBias int32 `json:"DaylightBias,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go index 0e48ece500c61..9228923fe4c06 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/uefi.go @@ -12,6 +12,8 @@ package hcsschema type Uefi struct { EnableDebugger bool `json:"EnableDebugger,omitempty"` + ApplySecureBootTemplate string `json:"ApplySecureBootTemplate,omitempty"` + SecureBootTemplateId string `json:"SecureBootTemplateId,omitempty"` BootThis *UefiBootEntry `json:"BootThis,omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go index 2d22b1bcb0843..1e0fab2890c4a 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/virtual_machine.go @@ -29,4 +29,8 @@ type VirtualMachine struct { StorageQoS *StorageQoS `json:"StorageQoS,omitempty"` GuestConnection *GuestConnection `json:"GuestConnection,omitempty"` + + SecuritySettings *SecuritySettings `json:"SecuritySettings,omitempty"` + + DebugOptions *DebugOptions `json:"DebugOptions,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go index a634dfc151587..a46b0051df47e 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/service.go @@ -1,3 +1,5 @@ +//go:build windows + package hcs import ( diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go index a76f6b253e2d4..cf20adefc93ed 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go @@ -1,3 +1,5 @@ +//go:build windows + package hcs import ( @@ -37,6 +39,9 @@ type System struct { startTime time.Time } +var _ cow.Container = &System{} +var _ cow.ProcessHost = &System{} + func newSystem(id string) *System { return &System{ id: id, @@ -55,7 +60,7 @@ func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface in // hcsCreateComputeSystemContext is an async operation. Start the outer span // here to measure the full create time. - ctx, span := trace.StartSpan(ctx, operation) + ctx, span := oc.StartSpan(ctx, operation) defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes(trace.StringAttribute("cid", id)) @@ -89,7 +94,8 @@ func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface in } } - events, err := processAsyncHcsResult(ctx, createError, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemCreateCompleted, &timeout.SystemCreate) + events, err := processAsyncHcsResult(ctx, createError, resultJSON, computeSystem.callbackNumber, + hcsNotificationSystemCreateCompleted, &timeout.SystemCreate) if err != nil { if err == ErrTimeout { // Terminate the compute system if it still exists. We're okay to @@ -190,7 +196,7 @@ func (computeSystem *System) Start(ctx context.Context) (err error) { // hcsStartComputeSystemContext is an async operation. Start the outer span // here to measure the full start time. - ctx, span := trace.StartSpan(ctx, operation) + ctx, span := oc.StartSpan(ctx, operation) defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) @@ -198,12 +204,15 @@ func (computeSystem *System) Start(ctx context.Context) (err error) { computeSystem.handleLock.RLock() defer computeSystem.handleLock.RUnlock() + // prevent starting an exited system because waitblock we do not recreate waitBlock + // or rerun waitBackground, so we have no way to be notified of it closing again if computeSystem.handle == 0 { return makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) } resultJSON, err := vmcompute.HcsStartComputeSystem(ctx, computeSystem.handle, "") - events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemStartCompleted, &timeout.SystemStart) + events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, + hcsNotificationSystemStartCompleted, &timeout.SystemStart) if err != nil { return makeSystemError(computeSystem, operation, err, events) } @@ -223,7 +232,7 @@ func (computeSystem *System) Shutdown(ctx context.Context) error { operation := "hcs::System::Shutdown" - if computeSystem.handle == 0 { + if computeSystem.handle == 0 || computeSystem.stopped() { return nil } @@ -244,7 +253,7 @@ func (computeSystem *System) Terminate(ctx context.Context) error { operation := "hcs::System::Terminate" - if computeSystem.handle == 0 { + if computeSystem.handle == 0 || computeSystem.stopped() { return nil } @@ -265,7 +274,7 @@ func (computeSystem *System) Terminate(ctx context.Context) error { // safe to call multiple times. func (computeSystem *System) waitBackground() { operation := "hcs::System::waitBackground" - ctx, span := trace.StartSpan(context.Background(), operation) + ctx, span := oc.StartSpan(context.Background(), operation) defer span.End() span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) @@ -302,17 +311,25 @@ func (computeSystem *System) Wait() error { return computeSystem.WaitError() } -// ExitError returns an error describing the reason the compute system terminated. -func (computeSystem *System) ExitError() error { +// stopped returns true if the compute system stopped. +func (computeSystem *System) stopped() bool { select { case <-computeSystem.waitBlock: - if computeSystem.waitError != nil { - return computeSystem.waitError - } - return computeSystem.exitError + return true default: + } + return false +} + +// ExitError returns an error describing the reason the compute system terminated. +func (computeSystem *System) ExitError() error { + if !computeSystem.stopped() { return errors.New("container not exited") } + if computeSystem.waitError != nil { + return computeSystem.waitError + } + return computeSystem.exitError } // Properties returns the requested container properties targeting a V1 schema container. @@ -322,6 +339,10 @@ func (computeSystem *System) Properties(ctx context.Context, types ...schema1.Pr operation := "hcs::System::Properties" + if computeSystem.handle == 0 { + return nil, makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + queryBytes, err := json.Marshal(schema1.PropertyQuery{PropertyTypes: types}) if err != nil { return nil, makeSystemError(computeSystem, operation, err, nil) @@ -349,7 +370,11 @@ func (computeSystem *System) Properties(ctx context.Context, types ...schema1.Pr // failed to be queried they will be tallied up and returned in as the first return value. Failures on // query are NOT considered errors; the only failure case for this method is if the containers job object // cannot be opened. -func (computeSystem *System) queryInProc(ctx context.Context, props *hcsschema.Properties, types []hcsschema.PropertyType) ([]hcsschema.PropertyType, error) { +func (computeSystem *System) queryInProc( + ctx context.Context, + props *hcsschema.Properties, + types []hcsschema.PropertyType, +) ([]hcsschema.PropertyType, error) { // In the future we can make use of some new functionality in the HCS that allows you // to pass a job object for HCS to use for the container. Currently, the only way we'll // be able to open the job/silo is if we're running as SYSTEM. @@ -415,7 +440,7 @@ func (computeSystem *System) statisticsInProc(job *jobobject.JobObject) (*hcssch // as well which isn't great and is wasted work to fetch. // // HCS only let's you grab statistics in an all or nothing fashion, so we can't just grab the private - // working set ourselves and ask for everything else seperately. The optimization we can make here is + // working set ourselves and ask for everything else separately. The optimization we can make here is // to open the silo ourselves and do the same queries for the rest of the info, as well as calculating // the private working set in a more efficient manner by: // @@ -455,6 +480,10 @@ func (computeSystem *System) statisticsInProc(job *jobobject.JobObject) (*hcssch func (computeSystem *System) hcsPropertiesV2Query(ctx context.Context, types []hcsschema.PropertyType) (*hcsschema.Properties, error) { operation := "hcs::System::PropertiesV2" + if computeSystem.handle == 0 { + return nil, makeSystemError(computeSystem, operation, ErrAlreadyClosed, nil) + } + queryBytes, err := json.Marshal(hcsschema.PropertyQuery{PropertyTypes: types}) if err != nil { return nil, makeSystemError(computeSystem, operation, err, nil) @@ -503,7 +532,7 @@ func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschem if err == nil && len(fallbackTypes) == 0 { return properties, nil } else if err != nil { - logEntry.WithError(fmt.Errorf("failed to query compute system properties in-proc: %w", err)) + logEntry = logEntry.WithError(fmt.Errorf("failed to query compute system properties in-proc: %w", err)) fallbackTypes = types } @@ -535,9 +564,9 @@ func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschem func (computeSystem *System) Pause(ctx context.Context) (err error) { operation := "hcs::System::Pause" - // hcsPauseComputeSystemContext is an async peration. Start the outer span + // hcsPauseComputeSystemContext is an async operation. Start the outer span // here to measure the full pause time. - ctx, span := trace.StartSpan(ctx, operation) + ctx, span := oc.StartSpan(ctx, operation) defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) @@ -550,7 +579,8 @@ func (computeSystem *System) Pause(ctx context.Context) (err error) { } resultJSON, err := vmcompute.HcsPauseComputeSystem(ctx, computeSystem.handle, "") - events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemPauseCompleted, &timeout.SystemPause) + events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, + hcsNotificationSystemPauseCompleted, &timeout.SystemPause) if err != nil { return makeSystemError(computeSystem, operation, err, events) } @@ -564,7 +594,7 @@ func (computeSystem *System) Resume(ctx context.Context) (err error) { // hcsResumeComputeSystemContext is an async operation. Start the outer span // here to measure the full restore time. - ctx, span := trace.StartSpan(ctx, operation) + ctx, span := oc.StartSpan(ctx, operation) defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) @@ -577,7 +607,8 @@ func (computeSystem *System) Resume(ctx context.Context) (err error) { } resultJSON, err := vmcompute.HcsResumeComputeSystem(ctx, computeSystem.handle, "") - events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, hcsNotificationSystemResumeCompleted, &timeout.SystemResume) + events, err := processAsyncHcsResult(ctx, err, resultJSON, computeSystem.callbackNumber, + hcsNotificationSystemResumeCompleted, &timeout.SystemResume) if err != nil { return makeSystemError(computeSystem, operation, err, events) } @@ -589,9 +620,9 @@ func (computeSystem *System) Resume(ctx context.Context) (err error) { func (computeSystem *System) Save(ctx context.Context, options interface{}) (err error) { operation := "hcs::System::Save" - // hcsSaveComputeSystemContext is an async peration. Start the outer span + // hcsSaveComputeSystemContext is an async operation. Start the outer span // here to measure the full save time. - ctx, span := trace.StartSpan(ctx, operation) + ctx, span := oc.StartSpan(ctx, operation) defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) @@ -609,7 +640,8 @@ func (computeSystem *System) Save(ctx context.Context, options interface{}) (err } result, err := vmcompute.HcsSaveComputeSystem(ctx, computeSystem.handle, string(saveOptions)) - events, err := processAsyncHcsResult(ctx, err, result, computeSystem.callbackNumber, hcsNotificationSystemSaveCompleted, &timeout.SystemSave) + events, err := processAsyncHcsResult(ctx, err, result, computeSystem.callbackNumber, + hcsNotificationSystemSaveCompleted, &timeout.SystemSave) if err != nil { return makeSystemError(computeSystem, operation, err, events) } @@ -634,6 +666,11 @@ func (computeSystem *System) createProcess(ctx context.Context, operation string processInfo, processHandle, resultJSON, err := vmcompute.HcsCreateProcess(ctx, computeSystem.handle, configuration) events := processHcsResult(ctx, resultJSON) if err != nil { + if v2, ok := c.(*hcsschema.ProcessParameters); ok { + operation += ": " + v2.CommandLine + } else if v1, ok := c.(*schema1.ProcessConfig); ok { + operation += ": " + v1.CommandLine + } return nil, nil, makeSystemError(computeSystem, operation, err, events) } @@ -700,7 +737,7 @@ func (computeSystem *System) OpenProcess(ctx context.Context, pid int) (*Process // Close cleans up any state associated with the compute system but does not terminate or wait for it. func (computeSystem *System) Close() (err error) { operation := "hcs::System::Close" - ctx, span := trace.StartSpan(context.Background(), operation) + ctx, span := oc.StartSpan(context.Background(), operation) defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes(trace.StringAttribute("cid", computeSystem.id)) @@ -743,7 +780,8 @@ func (computeSystem *System) registerCallback(ctx context.Context) error { callbackMap[callbackNumber] = callbackContext callbackMapLock.Unlock() - callbackHandle, err := vmcompute.HcsRegisterComputeSystemCallback(ctx, computeSystem.handle, notificationWatcherCallback, callbackNumber) + callbackHandle, err := vmcompute.HcsRegisterComputeSystemCallback(ctx, computeSystem.handle, + notificationWatcherCallback, callbackNumber) if err != nil { return err } @@ -770,7 +808,7 @@ func (computeSystem *System) unregisterCallback(ctx context.Context) error { return nil } - // hcsUnregisterComputeSystemCallback has its own syncronization + // hcsUnregisterComputeSystemCallback has its own synchronization // to wait for all callbacks to complete. We must NOT hold the callbackMapLock. err := vmcompute.HcsUnregisterComputeSystemCallback(ctx, handle) if err != nil { diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go index 3342e5bb94868..5dcb97eb39887 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/utils.go @@ -1,3 +1,5 @@ +//go:build windows + package hcs import ( diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go index db4e14fdfb29e..3a51ed1955714 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/waithelper.go @@ -1,3 +1,5 @@ +//go:build windows + package hcs import ( @@ -7,7 +9,14 @@ import ( "github.com/Microsoft/hcsshim/internal/log" ) -func processAsyncHcsResult(ctx context.Context, err error, resultJSON string, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) ([]ErrorEvent, error) { +func processAsyncHcsResult( + ctx context.Context, + err error, + resultJSON string, + callbackNumber uintptr, + expectedNotification hcsNotification, + timeout *time.Duration, +) ([]ErrorEvent, error) { events := processHcsResult(ctx, resultJSON) if IsPending(err) { return nil, waitForNotification(ctx, callbackNumber, expectedNotification, timeout) @@ -16,7 +25,12 @@ func processAsyncHcsResult(ctx context.Context, err error, resultJSON string, ca return events, err } -func waitForNotification(ctx context.Context, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error { +func waitForNotification( + ctx context.Context, + callbackNumber uintptr, + expectedNotification hcsNotification, + timeout *time.Duration, +) error { callbackMapLock.RLock() if _, ok := callbackMap[callbackNumber]; !ok { callbackMapLock.RUnlock() diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcserror/doc.go b/vendor/github.com/Microsoft/hcsshim/internal/hcserror/doc.go new file mode 100644 index 0000000000000..ce70676789554 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcserror/doc.go @@ -0,0 +1 @@ +package hcserror diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go b/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go index 921c2c8556c2f..a70d80da075c0 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcserror/hcserror.go @@ -1,11 +1,13 @@ +//go:build windows + package hcserror import ( + "errors" "fmt" - "syscall" -) -const ERROR_GEN_FAILURE = syscall.Errno(31) + "golang.org/x/sys/windows" +) type HcsError struct { title string @@ -30,18 +32,21 @@ func (e *HcsError) Error() string { func New(err error, title, rest string) error { // Pass through DLL errors directly since they do not originate from HCS. - if _, ok := err.(*syscall.DLLError); ok { + var e *windows.DLLError + if errors.As(err, &e) { return err } return &HcsError{title, rest, err} } func Win32FromError(err error) uint32 { - if herr, ok := err.(*HcsError); ok { + var herr *HcsError + if errors.As(err, &herr) { return Win32FromError(herr.Err) } - if code, ok := err.(syscall.Errno); ok { + var code windows.Errno + if errors.As(err, &code) { return uint32(code) } - return uint32(ERROR_GEN_FAILURE) + return uint32(windows.ERROR_GEN_FAILURE) } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/doc.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/doc.go new file mode 100644 index 0000000000000..f6d35df0e5303 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/doc.go @@ -0,0 +1 @@ +package hns diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go index b2e475f53c664..ec4c907d1f5d4 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hns.go @@ -2,7 +2,7 @@ package hns import "fmt" -//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go hns.go +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go hns.go //sys _hnsCall(method string, path string, object string, response **uint16) (hr error) = vmcompute.HNSCall? diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go index 7cf954c7b26fa..593664419d5c9 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsendpoint.go @@ -1,3 +1,5 @@ +//go:build windows + package hns import ( @@ -34,7 +36,7 @@ type HNSEndpoint struct { SharedContainers []string `json:",omitempty"` } -//SystemType represents the type of the system on which actions are done +// SystemType represents the type of the system on which actions are done type SystemType string // SystemType const @@ -146,7 +148,6 @@ func (endpoint *HNSEndpoint) IsAttached(vID string) (bool, error) { } return false, nil - } // Create Endpoint by sending EndpointRequest to HNS. TODO: Create a separate HNS interface to place all these methods @@ -281,7 +282,6 @@ func (endpoint *HNSEndpoint) HostAttach(compartmentID uint16) error { return err } return hnsCall("POST", "/endpoints/"+endpoint.Id+"/attach", string(jsonString), &response) - } // HostDetach detaches a nic on the host diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go index 2df4a57f56c71..0a8f36d832fe0 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go @@ -1,3 +1,5 @@ +//go:build windows + package hns import ( diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go index a8d8cc56aea8f..464bb8954fb6c 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsglobals.go @@ -1,3 +1,5 @@ +//go:build windows + package hns type HNSGlobals struct { diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go index f12d3ab0411a6..8861faee7a229 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnsnetwork.go @@ -1,13 +1,16 @@ +//go:build windows + package hns import ( "encoding/json" "errors" - "github.com/sirupsen/logrus" "net" + + "github.com/sirupsen/logrus" ) -// Subnet is assoicated with a network and represents a list +// Subnet is associated with a network and represents a list // of subnets available to the network type Subnet struct { AddressPrefix string `json:",omitempty"` @@ -15,7 +18,7 @@ type Subnet struct { Policies []json.RawMessage `json:",omitempty"` } -// MacPool is assoicated with a network and represents a list +// MacPool is associated with a network and represents a list // of macaddresses available to the network type MacPool struct { StartMacAddress string `json:",omitempty"` diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go index 84b3682184745..082c018a4e23f 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go @@ -94,15 +94,15 @@ type ACLPolicy struct { InternalPort uint16 `json:",omitempty"` Action ActionType Direction DirectionType - LocalAddresses string `json:",omitempty"` - RemoteAddresses string `json:",omitempty"` - LocalPorts string `json:"LocalPorts,omitempty"` - LocalPort uint16 `json:",omitempty"` - RemotePorts string `json:"RemotePorts,omitempty"` - RemotePort uint16 `json:",omitempty"` - RuleType RuleType `json:"RuleType,omitempty"` - Priority uint16 `json:",omitempty"` - ServiceName string `json:",omitempty"` + LocalAddresses string `json:",omitempty"` + RemoteAddresses string `json:",omitempty"` + LocalPorts string `json:"LocalPorts,omitempty"` + LocalPort uint16 `json:",omitempty"` + RemotePorts string `json:"RemotePorts,omitempty"` + RemotePort uint16 `json:",omitempty"` + RuleType RuleType `json:"RuleType,omitempty"` + Priority uint16 `json:",omitempty"` + ServiceName string `json:",omitempty"` } type Policy struct { diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go index 31322a68167c5..b98db40e8d347 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicylist.go @@ -1,3 +1,5 @@ +//go:build windows + package hns import ( diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go index d5efba7f28410..b9c30b901913b 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnssupport.go @@ -1,3 +1,5 @@ +//go:build windows + package hns import ( diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go index d3b04eefe0cd6..749588ad39ae7 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go @@ -1,3 +1,5 @@ +//go:build windows + package hns import ( diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go index 204633a4887d7..a35ee945dbc7a 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/zsyscall_windows.go @@ -1,4 +1,6 @@ -// Code generated mksyscall_windows.exe DO NOT EDIT +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. package hns @@ -19,6 +21,7 @@ const ( var ( errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL ) // errnoErr returns common boxed Errno values, to prevent @@ -26,7 +29,7 @@ var ( func errnoErr(e syscall.Errno) error { switch e { case 0: - return nil + return errERROR_EINVAL case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } @@ -62,7 +65,8 @@ func _hnsCall(method string, path string, object string, response **uint16) (hr } func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16) (hr error) { - if hr = procHNSCall.Find(); hr != nil { + hr = procHNSCall.Find() + if hr != nil { return } r0, _, _ := syscall.Syscall6(procHNSCall.Addr(), 4, uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)), 0, 0) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/interop/doc.go b/vendor/github.com/Microsoft/hcsshim/internal/interop/doc.go new file mode 100644 index 0000000000000..cb554867fe1d7 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/interop/doc.go @@ -0,0 +1 @@ +package interop diff --git a/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go b/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go index 922f7c679e063..a564696568236 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/interop/interop.go @@ -1,3 +1,5 @@ +//go:build windows + package interop import ( @@ -5,7 +7,7 @@ import ( "unsafe" ) -//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go interop.go +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go interop.go //sys coTaskMemFree(buffer unsafe.Pointer) = api_ms_win_core_com_l1_1_0.CoTaskMemFree diff --git a/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go index 12b0c71c5ae0e..a17a112508385 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/interop/zsyscall_windows.go @@ -1,4 +1,6 @@ -// Code generated mksyscall_windows.exe DO NOT EDIT +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. package interop @@ -19,6 +21,7 @@ const ( var ( errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL ) // errnoErr returns common boxed Errno values, to prevent @@ -26,7 +29,7 @@ var ( func errnoErr(e syscall.Errno) error { switch e { case 0: - return nil + return errERROR_EINVAL case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/doc.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/doc.go new file mode 100644 index 0000000000000..34b53d6e4842b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/doc.go @@ -0,0 +1,8 @@ +// This package provides higher level constructs for the win32 job object API. +// Most of the core creation and management functions are already present in "golang.org/x/sys/windows" +// (CreateJobObject, AssignProcessToJobObject, etc.) as well as most of the limit information +// structs and associated limit flags. Whatever is not present from the job object API +// in golang.org/x/sys/windows is located in /internal/winapi. +// +// https://docs.microsoft.com/en-us/windows/win32/procthread/job-objects +package jobobject diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go index 5d6acd69e618e..bcca84b0da3ce 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go @@ -1,3 +1,5 @@ +//go:build windows + package jobobject import ( diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go index c9fdd921a7f8b..64afd35dc6326 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go @@ -1,10 +1,15 @@ +//go:build windows + package jobobject import ( "context" "errors" "fmt" + "os" + "path/filepath" "sync" + "sync/atomic" "unsafe" "github.com/Microsoft/hcsshim/internal/queue" @@ -12,19 +17,14 @@ import ( "golang.org/x/sys/windows" ) -// This file provides higher level constructs for the win32 job object API. -// Most of the core creation and management functions are already present in "golang.org/x/sys/windows" -// (CreateJobObject, AssignProcessToJobObject, etc.) as well as most of the limit information -// structs and associated limit flags. Whatever is not present from the job object API -// in golang.org/x/sys/windows is located in /internal/winapi. -// -// https://docs.microsoft.com/en-us/windows/win32/procthread/job-objects - // JobObject is a high level wrapper around a Windows job object. Holds a handle to // the job, a queue to receive iocp notifications about the lifecycle // of the job and a mutex for synchronized handle access. type JobObject struct { - handle windows.Handle + handle windows.Handle + // All accesses to this MUST be done atomically except in `Open` as the object + // is being created in the function. 1 signifies that this job is currently a silo. + silo uint32 mq *queue.MessageQueue handleLock sync.RWMutex } @@ -56,6 +56,7 @@ const ( var ( ErrAlreadyClosed = errors.New("the handle has already been closed") ErrNotRegistered = errors.New("job is not registered to receive notifications") + ErrNotSilo = errors.New("job is not a silo") ) // Options represents the set of configurable options when making or opening a job object. @@ -68,6 +69,9 @@ type Options struct { // `UseNTVariant` specifies if we should use the `Nt` variant of Open/CreateJobObject. // Defaults to false. UseNTVariant bool + // `Silo` specifies to promote the job to a silo. This additionally sets the flag + // JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE as it is required for the upgrade to complete. + Silo bool // `IOTracking` enables tracking I/O statistics on the job object. More specifically this // calls SetInformationJobObject with the JobObjectIoAttribution class. EnableIOTracking bool @@ -143,6 +147,16 @@ func Create(ctx context.Context, options *Options) (_ *JobObject, err error) { } } + if options.Silo { + // This is a required setting for upgrading to a silo. + if err := job.SetTerminateOnLastHandleClose(); err != nil { + return nil, err + } + if err := job.PromoteToSilo(); err != nil { + return nil, err + } + } + return job, nil } @@ -163,7 +177,7 @@ func Open(ctx context.Context, options *Options) (_ *JobObject, err error) { } var jobHandle windows.Handle - if options != nil && options.UseNTVariant { + if options.UseNTVariant { oa := winapi.ObjectAttributes{ Length: unsafe.Sizeof(winapi.ObjectAttributes{}), ObjectName: unicodeJobName, @@ -174,7 +188,7 @@ func Open(ctx context.Context, options *Options) (_ *JobObject, err error) { return nil, winapi.RtlNtStatusToDosError(status) } } else { - jobHandle, err = winapi.OpenJobObject(winapi.JOB_OBJECT_ALL_ACCESS, false, unicodeJobName.Buffer) + jobHandle, err = winapi.OpenJobObject(winapi.JOB_OBJECT_ALL_ACCESS, 0, unicodeJobName.Buffer) if err != nil { return nil, err } @@ -190,9 +204,13 @@ func Open(ctx context.Context, options *Options) (_ *JobObject, err error) { handle: jobHandle, } + if isJobSilo(jobHandle) { + job.silo = 1 + } + // If the IOCP we'll be using to receive messages for all jobs hasn't been // created, create it and start polling. - if options != nil && options.Notifications { + if options.Notifications { mq, err := setupNotifications(ctx, job) if err != nil { return nil, err @@ -450,6 +468,119 @@ func (job *JobObject) QueryStorageStats() (*winapi.JOBOBJECT_IO_ATTRIBUTION_INFO return &info, nil } +// ApplyFileBinding makes a file binding using the Bind Filter from target to root. If the job has +// not been upgraded to a silo this call will fail. The binding is only applied and visible for processes +// running in the job, any processes on the host or in another job will not be able to see the binding. +func (job *JobObject) ApplyFileBinding(root, target string, readOnly bool) error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + + if !job.isSilo() { + return ErrNotSilo + } + + // The parent directory needs to exist for the bind to work. MkdirAll stats and + // returns nil if the directory exists internally so we should be fine to mkdirall + // every time. + if err := os.MkdirAll(filepath.Dir(root), 0); err != nil { + return err + } + + rootPtr, err := windows.UTF16PtrFromString(root) + if err != nil { + return err + } + + targetPtr, err := windows.UTF16PtrFromString(target) + if err != nil { + return err + } + + flags := winapi.BINDFLT_FLAG_USE_CURRENT_SILO_MAPPING + if readOnly { + flags |= winapi.BINDFLT_FLAG_READ_ONLY_MAPPING + } + + if err := winapi.BfSetupFilter( + job.handle, + flags, + rootPtr, + targetPtr, + nil, + 0, + ); err != nil { + return fmt.Errorf("failed to bind target %q to root %q for job object: %w", target, root, err) + } + return nil +} + +// isJobSilo is a helper to determine if a job object that was opened is a silo. This should ONLY be called +// from `Open` and any callers in this package afterwards should use `job.isSilo()` +func isJobSilo(h windows.Handle) bool { + // None of the information from the structure that this info class expects will be used, this is just used as + // the call will fail if the job hasn't been upgraded to a silo so we can use this to tell when we open a job + // if it's a silo or not. Because none of the info matters simply define a dummy struct with the size that the call + // expects which is 16 bytes. + type isSiloObj struct { + _ [16]byte + } + var siloInfo isSiloObj + err := winapi.QueryInformationJobObject( + h, + winapi.JobObjectSiloBasicInformation, + unsafe.Pointer(&siloInfo), + uint32(unsafe.Sizeof(siloInfo)), + nil, + ) + return err == nil +} + +// PromoteToSilo promotes a job object to a silo. There must be no running processess +// in the job for this to succeed. If the job is already a silo this is a no-op. +func (job *JobObject) PromoteToSilo() error { + job.handleLock.RLock() + defer job.handleLock.RUnlock() + + if job.handle == 0 { + return ErrAlreadyClosed + } + + if job.isSilo() { + return nil + } + + pids, err := job.Pids() + if err != nil { + return err + } + + if len(pids) != 0 { + return fmt.Errorf("job cannot have running processes to be promoted to a silo, found %d running processes", len(pids)) + } + + _, err = windows.SetInformationJobObject( + job.handle, + winapi.JobObjectCreateSilo, + 0, + 0, + ) + if err != nil { + return fmt.Errorf("failed to promote job to silo: %w", err) + } + + atomic.StoreUint32(&job.silo, 1) + return nil +} + +// isSilo returns if the job object is a silo. +func (job *JobObject) isSilo() bool { + return atomic.LoadUint32(&job.silo) == 1 +} + // QueryPrivateWorkingSet returns the private working set size for the job. This is calculated by adding up the // private working set for every process running in the job. func (job *JobObject) QueryPrivateWorkingSet() (uint64, error) { diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go index 4efde292c49d5..03f71d9a425f0 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go @@ -1,3 +1,5 @@ +//go:build windows + package jobobject import ( diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/context.go b/vendor/github.com/Microsoft/hcsshim/internal/log/context.go new file mode 100644 index 0000000000000..d17d909d93c3d --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/context.go @@ -0,0 +1,118 @@ +package log + +import ( + "context" + + "github.com/sirupsen/logrus" + "go.opencensus.io/trace" +) + +type entryContextKeyType int + +const _entryContextKey entryContextKeyType = iota + +var ( + // L is the default, blank logging entry. WithField and co. all return a copy + // of the original entry, so this will not leak fields between calls. + // + // Do NOT modify fields directly, as that will corrupt state for all users and + // is not thread safe. + // Instead, use `L.With*` or `L.Dup()`. Or `G(context.Background())`. + L = logrus.NewEntry(logrus.StandardLogger()) + + // G is an alias for GetEntry + G = GetEntry + + // S is an alias for SetEntry + S = SetEntry + + // U is an alias for UpdateContext + U = UpdateContext +) + +// GetEntry returns a `logrus.Entry` stored in the context, if one exists. +// Otherwise, it returns a default entry that points to the current context. +// +// Note: if the a new entry is returned, it will reference the passed in context. +// However, existing contexts may be stored in parent contexts and additionally reference +// earlier contexts. +// Use `UpdateContext` to update the entry and context. +func GetEntry(ctx context.Context) *logrus.Entry { + entry := fromContext(ctx) + + if entry == nil { + entry = L.WithContext(ctx) + } + + return entry +} + +// SetEntry updates the log entry in the context with the provided fields, and +// returns both. It is equivalent to: +// +// entry := GetEntry(ctx).WithFields(fields) +// ctx = WithContext(ctx, entry) +// +// See WithContext for more information. +func SetEntry(ctx context.Context, fields logrus.Fields) (context.Context, *logrus.Entry) { + e := GetEntry(ctx) + if len(fields) > 0 { + e = e.WithFields(fields) + } + return WithContext(ctx, e) +} + +// UpdateContext extracts the log entry from the context, and, if the entry's +// context points to a parent's of the current context, ands the entry +// to the most recent context. It is equivalent to: +// +// entry := GetEntry(ctx) +// ctx = WithContext(ctx, entry) +// +// This allows the entry to reference the most recent context and any new +// values (such as span contexts) added to it. +// +// See WithContext for more information. +func UpdateContext(ctx context.Context) context.Context { + // there is no way to check its ctx (and not one of its parents) that contains `e` + // so, at a slight cost, force add `e` to the context + ctx, _ = WithContext(ctx, GetEntry(ctx)) + return ctx +} + +// WithContext returns a context that contains the provided log entry. +// The entry can be extracted with `GetEntry` (`G`) +// +// The entry in the context is a copy of `entry` (generated by `entry.WithContext`) +func WithContext(ctx context.Context, entry *logrus.Entry) (context.Context, *logrus.Entry) { + // regardless of the order, entry.Context != GetEntry(ctx) + // here, the returned entry will reference the supplied context + entry = entry.WithContext(ctx) + ctx = context.WithValue(ctx, _entryContextKey, entry) + + return ctx, entry +} + +// Copy extracts the tracing Span and logging entry from the src Context, if they +// exist, and adds them to the dst Context. +// +// This is useful to share tracing and logging between contexts, but not the +// cancellation. For example, if the src Context has been cancelled but cleanup +// operations triggered by the cancellation require a non-cancelled context to +// execute. +func Copy(dst context.Context, src context.Context) context.Context { + if s := trace.FromContext(src); s != nil { + dst = trace.NewContext(dst, s) + } + + if e := fromContext(src); e != nil { + dst, _ = WithContext(dst, e) + } + + return dst +} + +func fromContext(ctx context.Context) *logrus.Entry { + e, _ := ctx.Value(_entryContextKey).(*logrus.Entry) + return e +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/g.go b/vendor/github.com/Microsoft/hcsshim/internal/log/g.go deleted file mode 100644 index ba6b1a4a53a7a..0000000000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/log/g.go +++ /dev/null @@ -1,23 +0,0 @@ -package log - -import ( - "context" - - "github.com/sirupsen/logrus" - "go.opencensus.io/trace" -) - -// G returns a `logrus.Entry` with the `TraceID, SpanID` from `ctx` if `ctx` -// contains an OpenCensus `trace.Span`. -func G(ctx context.Context) *logrus.Entry { - span := trace.FromContext(ctx) - if span != nil { - sctx := span.SpanContext() - return logrus.WithFields(logrus.Fields{ - "traceID": sctx.TraceID.String(), - "spanID": sctx.SpanID.String(), - // "parentSpanID": TODO: JTERRY75 - Try to convince OC to export this? - }) - } - return logrus.NewEntry(logrus.StandardLogger()) -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/hook.go b/vendor/github.com/Microsoft/hcsshim/internal/log/hook.go new file mode 100644 index 0000000000000..8f89405923d22 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/hook.go @@ -0,0 +1,45 @@ +package log + +import ( + "github.com/Microsoft/hcsshim/internal/logfields" + "github.com/sirupsen/logrus" + "go.opencensus.io/trace" +) + +// Hook serves to intercept and format `logrus.Entry`s before they are passed +// to the ETW hook. +// +// The containerd shim discards the (formatted) logrus output, and outputs only via ETW. +// The Linux GCS outputs logrus entries over stdout, which is consumed by the shim and +// then re-output via the ETW hook. +type Hook struct{} + +var _ logrus.Hook = &Hook{} + +func NewHook() *Hook { + return &Hook{} +} + +func (h *Hook) Levels() []logrus.Level { + return logrus.AllLevels +} + +func (h *Hook) Fire(e *logrus.Entry) (err error) { + h.addSpanContext(e) + + return nil +} + +func (h *Hook) addSpanContext(e *logrus.Entry) { + ctx := e.Context + if ctx == nil { + return + } + span := trace.FromContext(ctx) + if span == nil { + return + } + sctx := span.SpanContext() + e.Data[logfields.TraceID] = sctx.TraceID.String() + e.Data[logfields.SpanID] = sctx.SpanID.String() +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go b/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go new file mode 100644 index 0000000000000..d51e0fd89faeb --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go @@ -0,0 +1,194 @@ +package log + +import ( + "bytes" + "encoding/json" + "errors" + "strings" + "sync/atomic" + + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" +) + +// This package scrubs objects of potentially sensitive information to pass to logging + +type genMap = map[string]interface{} +type scrubberFunc func(genMap) error + +const _scrubbedReplacement = "" + +var ( + ErrUnknownType = errors.New("encoded object is of unknown type") + + // case sensitive keywords, so "env" is not a substring on "Environment" + _scrubKeywords = [][]byte{[]byte("env"), []byte("Environment")} + + _scrub int32 +) + +// SetScrubbing enables scrubbing +func SetScrubbing(enable bool) { + v := int32(0) // cant convert from bool to int32 directly + if enable { + v = 1 + } + atomic.StoreInt32(&_scrub, v) +} + +// IsScrubbingEnabled checks if scrubbing is enabled +func IsScrubbingEnabled() bool { + v := atomic.LoadInt32(&_scrub) + return v != 0 +} + +// ScrubProcessParameters scrubs HCS Create Process requests with config parameters of +// type internal/hcs/schema2.ScrubProcessParameters (aka hcsshema.ScrubProcessParameters) +func ScrubProcessParameters(s string) (string, error) { + // todo: deal with v1 ProcessConfig + b := []byte(s) + if !IsScrubbingEnabled() || !hasKeywords(b) || !json.Valid(b) { + return s, nil + } + + pp := hcsschema.ProcessParameters{} + if err := json.Unmarshal(b, &pp); err != nil { + return "", err + } + pp.Environment = map[string]string{_scrubbedReplacement: _scrubbedReplacement} + + buf := bytes.NewBuffer(b[:0]) + if err := encode(buf, pp); err != nil { + return "", err + } + return strings.TrimSpace(buf.String()), nil +} + +// ScrubBridgeCreate scrubs requests sent over the bridge of type +// internal/gcs/protocol.containerCreate wrapping an internal/hcsoci.linuxHostedSystem +func ScrubBridgeCreate(b []byte) ([]byte, error) { + return scrubBytes(b, scrubBridgeCreate) +} + +func scrubBridgeCreate(m genMap) error { + if !isRequestBase(m) { + return ErrUnknownType + } + if ss, ok := m["ContainerConfig"]; ok { + // ContainerConfig is a json encoded struct passed as a regular string field + s, ok := ss.(string) + if !ok { + return ErrUnknownType + } + b, err := scrubBytes([]byte(s), scrubLinuxHostedSystem) + if err != nil { + return err + } + m["ContainerConfig"] = string(b) + return nil + } + return ErrUnknownType +} + +func scrubLinuxHostedSystem(m genMap) error { + if m, ok := index(m, "OciSpecification"); ok { + if _, ok := m["annotations"]; ok { + m["annotations"] = map[string]string{_scrubbedReplacement: _scrubbedReplacement} + } + if m, ok := index(m, "process"); ok { + if _, ok := m["env"]; ok { + m["env"] = []string{_scrubbedReplacement} + return nil + } + } + } + return ErrUnknownType +} + +// ScrubBridgeExecProcess scrubs requests sent over the bridge of type +// internal/gcs/protocol.containerExecuteProcess +func ScrubBridgeExecProcess(b []byte) ([]byte, error) { + return scrubBytes(b, scrubExecuteProcess) +} + +func scrubExecuteProcess(m genMap) error { + if !isRequestBase(m) { + return ErrUnknownType + } + if m, ok := index(m, "Settings"); ok { + if ss, ok := m["ProcessParameters"]; ok { + // ProcessParameters is a json encoded struct passed as a regular sting field + s, ok := ss.(string) + if !ok { + return ErrUnknownType + } + + s, err := ScrubProcessParameters(s) + if err != nil { + return err + } + + m["ProcessParameters"] = s + return nil + } + } + return ErrUnknownType +} + +func scrubBytes(b []byte, scrub scrubberFunc) ([]byte, error) { + if !IsScrubbingEnabled() || !hasKeywords(b) || !json.Valid(b) { + return b, nil + } + + m := make(genMap) + if err := json.Unmarshal(b, &m); err != nil { + return nil, err + } + + // could use regexp, but if the env strings contain braces, the regexp fails + // parsing into individual structs would require access to private structs + if err := scrub(m); err != nil { + return nil, err + } + + buf := &bytes.Buffer{} + if err := encode(buf, m); err != nil { + return nil, err + } + + return bytes.TrimSpace(buf.Bytes()), nil +} + +func encode(buf *bytes.Buffer, v interface{}) error { + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + if err := enc.Encode(v); err != nil { + return err + } + return nil +} + +func isRequestBase(m genMap) bool { + // neither of these are (currently) `omitempty` + _, a := m["ActivityId"] + _, c := m["ContainerId"] + return a && c +} + +// combination `m, ok := m[s]` and `m, ok := m.(genMap)` +func index(m genMap, s string) (genMap, bool) { + if m, ok := m[s]; ok { + mm, ok := m.(genMap) + return mm, ok + } + + return m, false +} + +func hasKeywords(b []byte) bool { + for _, bb := range _scrubKeywords { + if bytes.Contains(b, bb) { + return true + } + } + return false +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go b/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go index cf2c166d9b832..3e175e5222436 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go @@ -3,21 +3,44 @@ package logfields const ( // Identifiers + Name = "name" + Namespace = "namespace" + Operation = "operation" + + ID = "id" + SandboxID = "sid" ContainerID = "cid" - UVMID = "uvm-id" + ExecID = "eid" ProcessID = "pid" + TaskID = "tid" + UVMID = "uvm-id" + + // networking and IO + + File = "file" + Path = "path" + Bytes = "bytes" + Pipe = "pipe" // Common Misc - // Timeout represents an operation timeout. - Timeout = "timeout" + Attempt = "attemptNo" JSON = "json" + // Time + + StartTime = "startTime" + EndTime = "endTime" + Duration = "duration" + Timeout = "timeout" + // Keys/values Field = "field" + Key = "key" OCIAnnotation = "oci-annotation" Value = "value" + Options = "options" // Golang type's @@ -29,4 +52,10 @@ const ( // runhcs VMShimOperation = "vmshim-op" + + // logging and tracing + + TraceID = "traceID" + SpanID = "spanID" + ParentSpanID = "parentSpanID" ) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go b/vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go new file mode 100644 index 0000000000000..1ef5814d7ef4a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go @@ -0,0 +1,316 @@ +package memory + +import ( + "github.com/pkg/errors" +) + +const ( + minimumClassSize = MiB + maximumClassSize = 4 * GiB + memoryClassNumber = 7 +) + +var ( + ErrInvalidMemoryClass = errors.New("invalid memory class") + ErrEarlyMerge = errors.New("not all children have been freed") + ErrEmptyPoolOperation = errors.New("operation on empty pool") +) + +// GetMemoryClassType returns the minimum memory class type that can hold a device of +// a given size. The smallest class is 1MB and the largest one is 4GB with 2 bit offset +// intervals in between, for a total of 7 different classes. This function does not +// do a validity check +func GetMemoryClassType(s uint64) classType { + s = (s - 1) >> 20 + memCls := uint32(0) + for s > 0 { + s = s >> 2 + memCls++ + } + return classType(memCls) +} + +// GetMemoryClassSize returns size in bytes for a given memory class +func GetMemoryClassSize(memCls classType) (uint64, error) { + if memCls >= memoryClassNumber { + return 0, ErrInvalidMemoryClass + } + return minimumClassSize << (2 * memCls), nil +} + +// region represents a contiguous memory block +type region struct { + // parent region that has been split into 4 + parent *region + class classType + // offset represents offset in bytes + offset uint64 +} + +// memoryPool tracks free and busy (used) memory regions +type memoryPool struct { + free map[uint64]*region + busy map[uint64]*region +} + +// PoolAllocator implements a memory allocation strategy similar to buddy-malloc https://github.com/evanw/buddy-malloc/blob/master/buddy-malloc.c +// We borrow the idea of spanning a tree of fixed size regions on top of a contiguous memory +// space. +// +// There are a total of 7 different region sizes that can be allocated, with the smallest +// being 1MB and the largest 4GB (the default maximum size of a Virtual PMem device). +// +// For efficiency and to reduce fragmentation an entire region is allocated when requested. +// When there's no available region of requested size, we try to allocate more memory for +// this particular size by splitting the next available larger region into smaller ones, e.g. +// if there's no region available for size class 0, we try splitting a region from class 1, +// then class 2 etc, until we are able to do so or hit the upper limit. +type PoolAllocator struct { + pools [memoryClassNumber]*memoryPool +} + +var _ MappedRegion = ®ion{} +var _ Allocator = &PoolAllocator{} + +func (r *region) Offset() uint64 { + return r.offset +} + +func (r *region) Size() uint64 { + sz, err := GetMemoryClassSize(r.class) + if err != nil { + panic(err) + } + return sz +} + +func (r *region) Type() classType { + return r.class +} + +func newEmptyMemoryPool() *memoryPool { + return &memoryPool{ + free: make(map[uint64]*region), + busy: make(map[uint64]*region), + } +} + +func NewPoolMemoryAllocator() PoolAllocator { + pa := PoolAllocator{} + p := newEmptyMemoryPool() + // by default we allocate a single region with maximum possible size (class type) + p.free[0] = ®ion{ + class: memoryClassNumber - 1, + offset: 0, + } + pa.pools[memoryClassNumber-1] = p + return pa +} + +// Allocate checks memory region pool for the given `size` and returns a free region with +// minimal offset, if none available tries expanding matched memory pool. +// +// Internally it's done via moving a region from free pool into a busy pool +func (pa *PoolAllocator) Allocate(size uint64) (MappedRegion, error) { + memCls := GetMemoryClassType(size) + if memCls >= memoryClassNumber { + return nil, ErrInvalidMemoryClass + } + + // find region with the smallest offset + nextCls, nextOffset, err := pa.findNextOffset(memCls) + if err != nil { + return nil, err + } + + // this means that there are no more regions for the current class, try expanding + if nextCls != memCls { + if err := pa.split(memCls); err != nil { + if err == ErrInvalidMemoryClass { + return nil, ErrNotEnoughSpace + } + return nil, err + } + } + + if err := pa.markBusy(memCls, nextOffset); err != nil { + return nil, err + } + + // by this point memory pool for memCls should have been created, + // either prior or during split call + if r := pa.pools[memCls].busy[nextOffset]; r != nil { + return r, nil + } + + return nil, ErrNotEnoughSpace +} + +// Release marks a memory region of class `memCls` and offset `offset` as free and tries to merge smaller regions into +// a bigger one +func (pa *PoolAllocator) Release(reg MappedRegion) error { + mp := pa.pools[reg.Type()] + if mp == nil { + return ErrEmptyPoolOperation + } + + err := pa.markFree(reg.Type(), reg.Offset()) + if err != nil { + return err + } + + n := mp.free[reg.Offset()] + if n == nil { + return ErrNotAllocated + } + if err := pa.merge(n.parent); err != nil { + if err != ErrEarlyMerge { + return err + } + } + return nil +} + +// findNextOffset finds next region location for a given memCls +func (pa *PoolAllocator) findNextOffset(memCls classType) (classType, uint64, error) { + for mc := memCls; mc < memoryClassNumber; mc++ { + pi := pa.pools[mc] + if pi == nil || len(pi.free) == 0 { + continue + } + + target := uint64(maximumClassSize) + for offset := range pi.free { + if offset < target { + target = offset + } + } + return mc, target, nil + } + return 0, 0, ErrNotEnoughSpace +} + +// split tries to recursively split a bigger memory region into smaller ones until it succeeds or hits the upper limit +func (pa *PoolAllocator) split(clsType classType) error { + nextClsType := clsType + 1 + if nextClsType >= memoryClassNumber { + return ErrInvalidMemoryClass + } + + nextPool := pa.pools[nextClsType] + if nextPool == nil { + nextPool = newEmptyMemoryPool() + pa.pools[nextClsType] = nextPool + } + + cls, offset, err := pa.findNextOffset(nextClsType) + if err != nil { + return err + } + // not enough memory in the next class, try to recursively expand + if cls != nextClsType { + if err := pa.split(nextClsType); err != nil { + return err + } + } + + if err := pa.markBusy(nextClsType, offset); err != nil { + return err + } + + // memCls validity has been checked already, we can ignore the error + clsSize, _ := GetMemoryClassSize(clsType) + + nextReg := nextPool.busy[offset] + if nextReg == nil { + return ErrNotAllocated + } + + // expand memCls + cp := pa.pools[clsType] + if cp == nil { + cp = newEmptyMemoryPool() + pa.pools[clsType] = cp + } + // create 4 smaller regions + for i := uint64(0); i < 4; i++ { + offset := nextReg.offset + i*clsSize + reg := ®ion{ + parent: nextReg, + class: clsType, + offset: offset, + } + cp.free[offset] = reg + } + return nil +} + +func (pa *PoolAllocator) merge(parent *region) error { + // nothing to merge + if parent == nil { + return nil + } + + childCls := parent.class - 1 + childPool := pa.pools[childCls] + // no child nodes to merge, try to merge parent + if childPool == nil { + return pa.merge(parent.parent) + } + + childSize, err := GetMemoryClassSize(childCls) + if err != nil { + return err + } + + // check if all the child nodes are free + var children []*region + for i := uint64(0); i < 4; i++ { + child, free := childPool.free[parent.offset+i*childSize] + if !free { + return ErrEarlyMerge + } + children = append(children, child) + } + + // at this point all the child nodes will be free and we can merge + for _, child := range children { + delete(childPool.free, child.offset) + } + + if err := pa.markFree(parent.class, parent.offset); err != nil { + return err + } + + return pa.merge(parent.parent) +} + +// markFree internally moves a region with `offset` from busy to free map +func (pa *PoolAllocator) markFree(memCls classType, offset uint64) error { + clsPool := pa.pools[memCls] + if clsPool == nil { + return ErrEmptyPoolOperation + } + + if reg, exists := clsPool.busy[offset]; exists { + clsPool.free[offset] = reg + delete(clsPool.busy, offset) + return nil + } + return ErrNotAllocated +} + +// markBusy internally moves a region with `offset` from free to busy map +func (pa *PoolAllocator) markBusy(memCls classType, offset uint64) error { + clsPool := pa.pools[memCls] + if clsPool == nil { + return ErrEmptyPoolOperation + } + + if reg, exists := clsPool.free[offset]; exists { + clsPool.busy[offset] = reg + delete(clsPool.free, offset) + return nil + } + return ErrNotAllocated +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/memory/types.go b/vendor/github.com/Microsoft/hcsshim/internal/memory/types.go new file mode 100644 index 0000000000000..d6cdb8cc4cc78 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/memory/types.go @@ -0,0 +1,28 @@ +package memory + +import "github.com/pkg/errors" + +type classType uint32 + +const ( + MiB = 1024 * 1024 + GiB = 1024 * MiB +) + +var ( + ErrNotEnoughSpace = errors.New("not enough space") + ErrNotAllocated = errors.New("no memory allocated at the given offset") +) + +// MappedRegion represents a memory block with an offset +type MappedRegion interface { + Offset() uint64 + Size() uint64 + Type() classType +} + +// Allocator is an interface for memory allocation +type Allocator interface { + Allocate(uint64) (MappedRegion, error) + Release(MappedRegion) error +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go b/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go index fee4765cbc49d..0e2b7e9bf6279 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/oc/span.go @@ -1,9 +1,14 @@ package oc import ( + "context" + + "github.com/Microsoft/hcsshim/internal/log" "go.opencensus.io/trace" ) +var DefaultSampler = trace.AlwaysSample() + // SetSpanStatus sets `span.SetStatus` to the proper status depending on `err`. If // `err` is `nil` assumes `trace.StatusCodeOk`. func SetSpanStatus(span *trace.Span, err error) { @@ -15,3 +20,29 @@ func SetSpanStatus(span *trace.Span, err error) { } span.SetStatus(status) } + +// StartSpan wraps "go.opencensus.io/trace".StartSpan, but, if the span is sampling, +// adds a log entry to the context that points to the newly created span. +func StartSpan(ctx context.Context, name string, o ...trace.StartOption) (context.Context, *trace.Span) { + ctx, s := trace.StartSpan(ctx, name, o...) + return update(ctx, s) +} + +// StartSpanWithRemoteParent wraps "go.opencensus.io/trace".StartSpanWithRemoteParent. +// +// See StartSpan for more information. +func StartSpanWithRemoteParent(ctx context.Context, name string, parent trace.SpanContext, o ...trace.StartOption) (context.Context, *trace.Span) { + ctx, s := trace.StartSpanWithRemoteParent(ctx, name, parent, o...) + return update(ctx, s) +} + +func update(ctx context.Context, s *trace.Span) (context.Context, *trace.Span) { + if s.IsRecordingEvents() { + ctx = log.UpdateContext(ctx) + } + + return ctx, s +} + +var WithServerSpanKind = trace.WithSpanKind(trace.SpanKindServer) +var WithClientSpanKind = trace.WithSpanKind(trace.SpanKindClient) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/protocol/guestrequest/types.go b/vendor/github.com/Microsoft/hcsshim/internal/protocol/guestrequest/types.go new file mode 100644 index 0000000000000..d8d0c20b1036e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/protocol/guestrequest/types.go @@ -0,0 +1,56 @@ +package guestrequest + +// These are constants for v2 schema modify requests. + +type RequestType string +type ResourceType string + +// RequestType const +const ( + RequestTypeAdd RequestType = "Add" + RequestTypeRemove RequestType = "Remove" + RequestTypePreAdd RequestType = "PreAdd" // For networking + RequestTypeUpdate RequestType = "Update" +) + +type SignalValueWCOW string + +const ( + SignalValueWCOWCtrlC SignalValueWCOW = "CtrlC" + SignalValueWCOWCtrlBreak SignalValueWCOW = "CtrlBreak" + SignalValueWCOWCtrlClose SignalValueWCOW = "CtrlClose" + SignalValueWCOWCtrlLogOff SignalValueWCOW = "CtrlLogOff" + SignalValueWCOWCtrlShutdown SignalValueWCOW = "CtrlShutdown" +) + +// ModificationRequest is for modify commands passed to the guest. +type ModificationRequest struct { + RequestType RequestType `json:"RequestType,omitempty"` + ResourceType ResourceType `json:"ResourceType,omitempty"` + Settings interface{} `json:"Settings,omitempty"` +} + +type NetworkModifyRequest struct { + AdapterId string `json:"AdapterId,omitempty"` //nolint:stylecheck + RequestType RequestType `json:"RequestType,omitempty"` + Settings interface{} `json:"Settings,omitempty"` +} + +type RS4NetworkModifyRequest struct { + AdapterInstanceId string `json:"AdapterInstanceId,omitempty"` //nolint:stylecheck + RequestType RequestType `json:"RequestType,omitempty"` + Settings interface{} `json:"Settings,omitempty"` +} + +var ( + // V5 GUIDs for SCSI controllers + // These GUIDs are created with namespace GUID "d422512d-2bf2-4752-809d-7b82b5fcb1b4" + // and index as names. For example, first GUID is created like this: + // guid.NewV5("d422512d-2bf2-4752-809d-7b82b5fcb1b4", []byte("0")) + ScsiControllerGuids = []string{ + "df6d0690-79e5-55b6-a5ec-c1e2f77f580a", + "0110f83b-de10-5172-a266-78bca56bf50a", + "b5d2d8d4-3a75-51bf-945b-3444dc6b8579", + "305891a9-b251-5dfe-91a2-c25d9212275b", + } +) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/regstate/doc.go b/vendor/github.com/Microsoft/hcsshim/internal/regstate/doc.go new file mode 100644 index 0000000000000..51bcdf6e98369 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/regstate/doc.go @@ -0,0 +1 @@ +package regstate diff --git a/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go b/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go index dcbc9334d7698..a56be7b26573f 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go @@ -1,3 +1,5 @@ +//go:build windows + package regstate import ( @@ -13,7 +15,7 @@ import ( "golang.org/x/sys/windows/registry" ) -//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go regstate.go +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go regstate.go //sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW diff --git a/vendor/github.com/Microsoft/hcsshim/internal/regstate/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/regstate/zsyscall_windows.go index 4e349ad498497..4ff1b333a50e6 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/regstate/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/regstate/zsyscall_windows.go @@ -1,4 +1,6 @@ -// Code generated by 'go generate'; DO NOT EDIT. +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. package regstate @@ -19,6 +21,7 @@ const ( var ( errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL ) // errnoErr returns common boxed Errno values, to prevent @@ -26,7 +29,7 @@ var ( func errnoErr(e syscall.Errno) error { switch e { case 0: - return nil + return errERROR_EINVAL case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go b/vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go index a161c204e2faa..132b28d39c117 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go @@ -1,3 +1,5 @@ +//go:build windows + package runhcs import ( @@ -5,7 +7,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "syscall" "time" @@ -40,7 +41,7 @@ type ContainerState struct { // returned success or error. If error converts that to an error and returns. If // `p` is not nill will issue a `Kill` and `Wait` for exit. func GetErrorFromPipe(pipe io.Reader, p *os.Process) error { - serr, err := ioutil.ReadAll(pipe) + serr, err := io.ReadAll(pipe) if err != nil { return err } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go b/vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go index 2c8957b88df76..b3e443d6004d2 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go @@ -1,3 +1,5 @@ +//go:build windows + package runhcs import ( diff --git a/vendor/github.com/Microsoft/hcsshim/internal/safefile/do.go b/vendor/github.com/Microsoft/hcsshim/internal/safefile/do.go new file mode 100644 index 0000000000000..f211d25e72e96 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/safefile/do.go @@ -0,0 +1 @@ +package safefile diff --git a/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go index 66b8d7e03537f..74967f21af717 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go @@ -1,3 +1,5 @@ +//go:build windows + package safefile import ( @@ -156,7 +158,6 @@ func LinkRelative(oldname string, oldroot *os.File, newname string, newroot *os. if (fi.FileAttributes & syscall.FILE_ATTRIBUTE_REPARSE_POINT) != 0 { return &os.LinkError{Op: "link", Old: oldf.Name(), New: filepath.Join(newroot.Name(), newname), Err: winapi.RtlNtStatusToDosError(winapi.STATUS_REPARSE_POINT_ENCOUNTERED)} } - } else { parent = newroot } @@ -339,6 +340,33 @@ func MkdirRelative(path string, root *os.File) error { return err } +// MkdirAllRelative creates each directory in the path relative to a root, failing if +// any existing intermediate path components are reparse points. +func MkdirAllRelative(path string, root *os.File) error { + pathParts := strings.Split(filepath.Clean(path), (string)(filepath.Separator)) + for index := range pathParts { + partialPath := filepath.Join(pathParts[0 : index+1]...) + stat, err := LstatRelative(partialPath, root) + + if err != nil { + if os.IsNotExist(err) { + if err := MkdirRelative(partialPath, root); err != nil { + return err + } + continue + } + return err + } + + if !stat.IsDir() { + fullPath := filepath.Join(root.Name(), partialPath) + return &os.PathError{Op: "mkdir", Path: fullPath, Err: syscall.ENOTDIR} + } + } + + return nil +} + // LstatRelative performs a stat operation on a file relative to a root, failing // if any intermediate path components are reparse points. func LstatRelative(path string, root *os.File) (os.FileInfo, error) { diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go b/vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go similarity index 67% rename from vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go rename to vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go index 602920786c90e..bfcc157699a22 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/security/grantvmgroupaccess.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/security/grantvmgroupaccess.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package security @@ -19,25 +20,37 @@ type ( securityInformation uint32 trusteeForm uint32 trusteeType uint32 +) - explicitAccess struct { - accessPermissions accessMask - accessMode accessMode - inheritance inheritMode - trustee trustee - } +type explicitAccess struct { + //nolint:structcheck + accessPermissions accessMask + //nolint:structcheck + accessMode accessMode + //nolint:structcheck + inheritance inheritMode + //nolint:structcheck + trustee trustee +} - trustee struct { - multipleTrustee *trustee - multipleTrusteeOperation int32 - trusteeForm trusteeForm - trusteeType trusteeType - name uintptr - } -) +type trustee struct { + //nolint:unused,structcheck + multipleTrustee *trustee + //nolint:unused,structcheck + multipleTrusteeOperation int32 + trusteeForm trusteeForm + trusteeType trusteeType + name uintptr +} const ( - accessMaskDesiredPermission accessMask = 1 << 31 // GENERIC_READ + AccessMaskNone accessMask = 0 + AccessMaskRead accessMask = 1 << 31 // GENERIC_READ + AccessMaskWrite accessMask = 1 << 30 // GENERIC_WRITE + AccessMaskExecute accessMask = 1 << 29 // GENERIC_EXECUTE + AccessMaskAll accessMask = 1 << 28 // GENERIC_ALL + + accessMaskDesiredPermission = AccessMaskRead accessModeGrant accessMode = 1 @@ -56,6 +69,7 @@ const ( shareModeRead shareMode = 0x1 shareModeWrite shareMode = 0x2 + //nolint:stylecheck // ST1003 sidVmGroup = "S-1-5-83-0" trusteeFormIsSid trusteeForm = 0 @@ -63,11 +77,20 @@ const ( trusteeTypeWellKnownGroup trusteeType = 5 ) -// GrantVMGroupAccess sets the DACL for a specified file or directory to +// GrantVmGroupAccess sets the DACL for a specified file or directory to // include Grant ACE entries for the VM Group SID. This is a golang re- // implementation of the same function in vmcompute, just not exported in // RS5. Which kind of sucks. Sucks a lot :/ -func GrantVmGroupAccess(name string) error { +func GrantVmGroupAccess(name string) error { //nolint:stylecheck // ST1003 + return GrantVmGroupAccessWithMask(name, accessMaskDesiredPermission) +} + +// GrantVmGroupAccessWithMask sets the desired DACL for a specified file or +// directory. +func GrantVmGroupAccessWithMask(name string, access accessMask) error { //nolint:stylecheck // ST1003 + if access == 0 || access<<4 != 0 { + return fmt.Errorf("invalid access mask: 0x%08x", access) + } // Stat (to determine if `name` is a directory). s, err := os.Stat(name) if err != nil { @@ -79,7 +102,9 @@ func GrantVmGroupAccess(name string) error { if err != nil { return err // Already wrapped } - defer syscall.CloseHandle(fd) + defer func() { + _ = syscall.CloseHandle(fd) + }() // Get the current DACL and Security Descriptor. Must defer LocalFree on success. ot := objectTypeFileObject @@ -89,15 +114,19 @@ func GrantVmGroupAccess(name string) error { if err := getSecurityInfo(fd, uint32(ot), uint32(si), nil, nil, &origDACL, nil, &sd); err != nil { return fmt.Errorf("%s GetSecurityInfo %s: %w", gvmga, name, err) } - defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd))) + defer func() { + _, _ = syscall.LocalFree((syscall.Handle)(unsafe.Pointer(sd))) + }() // Generate a new DACL which is the current DACL with the required ACEs added. // Must defer LocalFree on success. - newDACL, err := generateDACLWithAcesAdded(name, s.IsDir(), origDACL) + newDACL, err := generateDACLWithAcesAdded(name, s.IsDir(), access, origDACL) if err != nil { return err // Already wrapped } - defer syscall.LocalFree((syscall.Handle)(unsafe.Pointer(newDACL))) + defer func() { + _, _ = syscall.LocalFree((syscall.Handle)(unsafe.Pointer(newDACL))) + }() // And finally use SetSecurityInfo to apply the updated DACL. if err := setSecurityInfo(fd, uint32(ot), uint32(si), uintptr(0), uintptr(0), newDACL, uintptr(0)); err != nil { @@ -110,7 +139,10 @@ func GrantVmGroupAccess(name string) error { // createFile is a helper function to call [Nt]CreateFile to get a handle to // the file or directory. func createFile(name string, isDir bool) (syscall.Handle, error) { - namep := syscall.StringToUTF16(name) + namep, err := syscall.UTF16FromString(name) + if err != nil { + return 0, fmt.Errorf("syscall.UTF16FromString %s: %w", name, err) + } da := uint32(desiredAccessReadControl | desiredAccessWriteDac) sm := uint32(shareModeRead | shareModeWrite) fa := uint32(syscall.FILE_ATTRIBUTE_NORMAL) @@ -126,7 +158,7 @@ func createFile(name string, isDir bool) (syscall.Handle, error) { // generateDACLWithAcesAdded generates a new DACL with the two needed ACEs added. // The caller is responsible for LocalFree of the returned DACL on success. -func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintptr, error) { +func generateDACLWithAcesAdded(name string, isDir bool, desiredAccess accessMask, origDACL uintptr) (uintptr, error) { // Generate pointers to the SIDs based on the string SIDs sid, err := syscall.StringToSid(sidVmGroup) if err != nil { @@ -139,8 +171,8 @@ func generateDACLWithAcesAdded(name string, isDir bool, origDACL uintptr) (uintp } eaArray := []explicitAccess{ - explicitAccess{ - accessPermissions: accessMaskDesiredPermission, + { + accessPermissions: desiredAccess, accessMode: accessModeGrant, inheritance: inheritance, trustee: trustee{ diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/security/syscall_windows.go similarity index 82% rename from vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go rename to vendor/github.com/Microsoft/hcsshim/internal/security/syscall_windows.go index d7096716ce2cf..71326e4e46fec 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/security/syscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/security/syscall_windows.go @@ -1,6 +1,6 @@ package security -//go:generate go run mksyscall_windows.go -output zsyscall_windows.go syscall_windows.go +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go syscall_windows.go //sys getSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, ppsidOwner **uintptr, ppsidGroup **uintptr, ppDacl *uintptr, ppSacl *uintptr, ppSecurityDescriptor *uintptr) (win32err error) = advapi32.GetSecurityInfo //sys setSecurityInfo(handle syscall.Handle, objectType uint32, si uint32, psidOwner uintptr, psidGroup uintptr, pDacl uintptr, pSacl uintptr) (win32err error) = advapi32.SetSecurityInfo diff --git a/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/security/zsyscall_windows.go similarity index 94% rename from vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go rename to vendor/github.com/Microsoft/hcsshim/internal/security/zsyscall_windows.go index 4084680e0f0a0..26c986b88feac 100644 --- a/vendor/github.com/Microsoft/go-winio/pkg/security/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/security/zsyscall_windows.go @@ -1,4 +1,6 @@ -// Code generated by 'go generate'; DO NOT EDIT. +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. package security diff --git a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/doc.go b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/doc.go new file mode 100644 index 0000000000000..9dd00c81289a5 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/doc.go @@ -0,0 +1 @@ +package vmcompute diff --git a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go index e7f114b67aaa0..79b14ef972dc4 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go @@ -1,3 +1,5 @@ +//go:build windows + package vmcompute import ( @@ -5,15 +7,17 @@ import ( "syscall" "time" + "github.com/sirupsen/logrus" + "go.opencensus.io/trace" + "github.com/Microsoft/hcsshim/internal/interop" "github.com/Microsoft/hcsshim/internal/log" "github.com/Microsoft/hcsshim/internal/logfields" "github.com/Microsoft/hcsshim/internal/oc" "github.com/Microsoft/hcsshim/internal/timeout" - "go.opencensus.io/trace" ) -//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go vmcompute.go +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go vmcompute.go //sys hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) = vmcompute.HcsEnumerateComputeSystems? //sys hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *HcsSystem, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystem? @@ -62,7 +66,7 @@ type HcsCallback syscall.Handle type HcsProcessInformation struct { // ProcessId is the pid of the created process. ProcessId uint32 - reserved uint32 //nolint:structcheck + _ uint32 // reserved padding // StdInput is the handle associated with the stdin of the process. StdInput syscall.Handle // StdOutput is the handle associated with the stdout of the process. @@ -72,12 +76,28 @@ type HcsProcessInformation struct { } func execute(ctx gcontext.Context, timeout time.Duration, f func() error) error { + now := time.Now() if timeout > 0 { var cancel gcontext.CancelFunc ctx, cancel = gcontext.WithTimeout(ctx, timeout) defer cancel() } + // if ctx already has prior deadlines, the shortest timeout takes precedence and is used. + // find the true timeout for reporting + // + // this is mostly an issue with (*UtilityVM).Start(context.Context), which sets its + // own (2 minute) timeout. + deadline, ok := ctx.Deadline() + trueTimeout := timeout + if ok { + trueTimeout = deadline.Sub(now) + log.G(ctx).WithFields(logrus.Fields{ + logfields.Timeout: trueTimeout, + "desiredTimeout": timeout, + }).Trace("Executing syscall with deadline") + } + done := make(chan error, 1) go func() { done <- f() @@ -85,8 +105,10 @@ func execute(ctx gcontext.Context, timeout time.Duration, f func() error) error select { case <-ctx.Done(): if ctx.Err() == gcontext.DeadlineExceeded { - log.G(ctx).WithField(logfields.Timeout, timeout). - Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. If it appears to be making no forward progress, obtain the stacks and see if there is a syscall stuck in the platform API for a significant length of time.") + log.G(ctx).WithField(logfields.Timeout, trueTimeout). + Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. " + + "If it appears to be making no forward progress, obtain the stacks and see if there is a syscall " + + "stuck in the platform API for a significant length of time.") } return ctx.Err() case err := <-done: @@ -95,7 +117,7 @@ func execute(ctx gcontext.Context, timeout time.Duration, f func() error) error } func HcsEnumerateComputeSystems(ctx gcontext.Context, query string) (computeSystems, result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsEnumerateComputeSystems") + ctx, span := oc.StartSpan(ctx, "HcsEnumerateComputeSystems") defer span.End() defer func() { if result != "" { @@ -122,7 +144,7 @@ func HcsEnumerateComputeSystems(ctx gcontext.Context, query string) (computeSyst } func HcsCreateComputeSystem(ctx gcontext.Context, id string, configuration string, identity syscall.Handle) (computeSystem HcsSystem, result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsCreateComputeSystem") + ctx, span := oc.StartSpan(ctx, "HcsCreateComputeSystem") defer span.End() defer func() { if result != "" { @@ -147,7 +169,7 @@ func HcsCreateComputeSystem(ctx gcontext.Context, id string, configuration strin } func HcsOpenComputeSystem(ctx gcontext.Context, id string) (computeSystem HcsSystem, result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsOpenComputeSystem") + ctx, span := oc.StartSpan(ctx, "HcsOpenComputeSystem") defer span.End() defer func() { if result != "" { @@ -167,7 +189,7 @@ func HcsOpenComputeSystem(ctx gcontext.Context, id string) (computeSystem HcsSys } func HcsCloseComputeSystem(ctx gcontext.Context, computeSystem HcsSystem) (hr error) { - ctx, span := trace.StartSpan(ctx, "HcsCloseComputeSystem") + ctx, span := oc.StartSpan(ctx, "HcsCloseComputeSystem") defer span.End() defer func() { oc.SetSpanStatus(span, hr) }() @@ -177,7 +199,7 @@ func HcsCloseComputeSystem(ctx gcontext.Context, computeSystem HcsSystem) (hr er } func HcsStartComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsStartComputeSystem") + ctx, span := oc.StartSpan(ctx, "HcsStartComputeSystem") defer span.End() defer func() { if result != "" { @@ -200,7 +222,7 @@ func HcsStartComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, option } func HcsShutdownComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsShutdownComputeSystem") + ctx, span := oc.StartSpan(ctx, "HcsShutdownComputeSystem") defer span.End() defer func() { if result != "" { @@ -223,7 +245,7 @@ func HcsShutdownComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, opt } func HcsTerminateComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsTerminateComputeSystem") + ctx, span := oc.StartSpan(ctx, "HcsTerminateComputeSystem") defer span.End() defer func() { if result != "" { @@ -246,7 +268,7 @@ func HcsTerminateComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, op } func HcsPauseComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsPauseComputeSystem") + ctx, span := oc.StartSpan(ctx, "HcsPauseComputeSystem") defer span.End() defer func() { if result != "" { @@ -269,7 +291,7 @@ func HcsPauseComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, option } func HcsResumeComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsResumeComputeSystem") + ctx, span := oc.StartSpan(ctx, "HcsResumeComputeSystem") defer span.End() defer func() { if result != "" { @@ -292,7 +314,7 @@ func HcsResumeComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, optio } func HcsGetComputeSystemProperties(ctx gcontext.Context, computeSystem HcsSystem, propertyQuery string) (properties, result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsGetComputeSystemProperties") + ctx, span := oc.StartSpan(ctx, "HcsGetComputeSystemProperties") defer span.End() defer func() { if result != "" { @@ -319,7 +341,7 @@ func HcsGetComputeSystemProperties(ctx gcontext.Context, computeSystem HcsSystem } func HcsModifyComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, configuration string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsModifyComputeSystem") + ctx, span := oc.StartSpan(ctx, "HcsModifyComputeSystem") defer span.End() defer func() { if result != "" { @@ -340,7 +362,7 @@ func HcsModifyComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, confi } func HcsModifyServiceSettings(ctx gcontext.Context, settings string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsModifyServiceSettings") + ctx, span := oc.StartSpan(ctx, "HcsModifyServiceSettings") defer span.End() defer func() { if result != "" { @@ -361,7 +383,7 @@ func HcsModifyServiceSettings(ctx gcontext.Context, settings string) (result str } func HcsRegisterComputeSystemCallback(ctx gcontext.Context, computeSystem HcsSystem, callback uintptr, context uintptr) (callbackHandle HcsCallback, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsRegisterComputeSystemCallback") + ctx, span := oc.StartSpan(ctx, "HcsRegisterComputeSystemCallback") defer span.End() defer func() { oc.SetSpanStatus(span, hr) }() @@ -371,7 +393,7 @@ func HcsRegisterComputeSystemCallback(ctx gcontext.Context, computeSystem HcsSys } func HcsUnregisterComputeSystemCallback(ctx gcontext.Context, callbackHandle HcsCallback) (hr error) { - ctx, span := trace.StartSpan(ctx, "HcsUnregisterComputeSystemCallback") + ctx, span := oc.StartSpan(ctx, "HcsUnregisterComputeSystemCallback") defer span.End() defer func() { oc.SetSpanStatus(span, hr) }() @@ -381,7 +403,7 @@ func HcsUnregisterComputeSystemCallback(ctx gcontext.Context, callbackHandle Hcs } func HcsCreateProcess(ctx gcontext.Context, computeSystem HcsSystem, processParameters string) (processInformation HcsProcessInformation, process HcsProcess, result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsCreateProcess") + ctx, span := oc.StartSpan(ctx, "HcsCreateProcess") defer span.End() defer func() { if result != "" { @@ -389,7 +411,12 @@ func HcsCreateProcess(ctx gcontext.Context, computeSystem HcsSystem, processPara } oc.SetSpanStatus(span, hr) }() - span.AddAttributes(trace.StringAttribute("processParameters", processParameters)) + if span.IsRecordingEvents() { + // wont handle v1 process parameters + if s, err := log.ScrubProcessParameters(processParameters); err == nil { + span.AddAttributes(trace.StringAttribute("processParameters", s)) + } + } return processInformation, process, result, execute(ctx, timeout.SyscallWatcher, func() error { var resultp *uint16 @@ -402,7 +429,7 @@ func HcsCreateProcess(ctx gcontext.Context, computeSystem HcsSystem, processPara } func HcsOpenProcess(ctx gcontext.Context, computeSystem HcsSystem, pid uint32) (process HcsProcess, result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsOpenProcess") + ctx, span := oc.StartSpan(ctx, "HcsOpenProcess") defer span.End() defer func() { if result != "" { @@ -423,7 +450,7 @@ func HcsOpenProcess(ctx gcontext.Context, computeSystem HcsSystem, pid uint32) ( } func HcsCloseProcess(ctx gcontext.Context, process HcsProcess) (hr error) { - ctx, span := trace.StartSpan(ctx, "HcsCloseProcess") + ctx, span := oc.StartSpan(ctx, "HcsCloseProcess") defer span.End() defer func() { oc.SetSpanStatus(span, hr) }() @@ -433,7 +460,7 @@ func HcsCloseProcess(ctx gcontext.Context, process HcsProcess) (hr error) { } func HcsTerminateProcess(ctx gcontext.Context, process HcsProcess) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsTerminateProcess") + ctx, span := oc.StartSpan(ctx, "HcsTerminateProcess") defer span.End() defer func() { if result != "" { @@ -453,7 +480,7 @@ func HcsTerminateProcess(ctx gcontext.Context, process HcsProcess) (result strin } func HcsSignalProcess(ctx gcontext.Context, process HcsProcess, options string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsSignalProcess") + ctx, span := oc.StartSpan(ctx, "HcsSignalProcess") defer span.End() defer func() { if result != "" { @@ -474,7 +501,7 @@ func HcsSignalProcess(ctx gcontext.Context, process HcsProcess, options string) } func HcsGetProcessInfo(ctx gcontext.Context, process HcsProcess) (processInformation HcsProcessInformation, result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsGetProcessInfo") + ctx, span := oc.StartSpan(ctx, "HcsGetProcessInfo") defer span.End() defer func() { if result != "" { @@ -494,7 +521,7 @@ func HcsGetProcessInfo(ctx gcontext.Context, process HcsProcess) (processInforma } func HcsGetProcessProperties(ctx gcontext.Context, process HcsProcess) (processProperties, result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsGetProcessProperties") + ctx, span := oc.StartSpan(ctx, "HcsGetProcessProperties") defer span.End() defer func() { if result != "" { @@ -520,7 +547,7 @@ func HcsGetProcessProperties(ctx gcontext.Context, process HcsProcess) (processP } func HcsModifyProcess(ctx gcontext.Context, process HcsProcess, settings string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsModifyProcess") + ctx, span := oc.StartSpan(ctx, "HcsModifyProcess") defer span.End() defer func() { if result != "" { @@ -541,7 +568,7 @@ func HcsModifyProcess(ctx gcontext.Context, process HcsProcess, settings string) } func HcsGetServiceProperties(ctx gcontext.Context, propertyQuery string) (properties, result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsGetServiceProperties") + ctx, span := oc.StartSpan(ctx, "HcsGetServiceProperties") defer span.End() defer func() { if result != "" { @@ -568,7 +595,7 @@ func HcsGetServiceProperties(ctx gcontext.Context, propertyQuery string) (proper } func HcsRegisterProcessCallback(ctx gcontext.Context, process HcsProcess, callback uintptr, context uintptr) (callbackHandle HcsCallback, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsRegisterProcessCallback") + ctx, span := oc.StartSpan(ctx, "HcsRegisterProcessCallback") defer span.End() defer func() { oc.SetSpanStatus(span, hr) }() @@ -578,7 +605,7 @@ func HcsRegisterProcessCallback(ctx gcontext.Context, process HcsProcess, callba } func HcsUnregisterProcessCallback(ctx gcontext.Context, callbackHandle HcsCallback) (hr error) { - ctx, span := trace.StartSpan(ctx, "HcsUnregisterProcessCallback") + ctx, span := oc.StartSpan(ctx, "HcsUnregisterProcessCallback") defer span.End() defer func() { oc.SetSpanStatus(span, hr) }() @@ -588,7 +615,7 @@ func HcsUnregisterProcessCallback(ctx gcontext.Context, callbackHandle HcsCallba } func HcsSaveComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options string) (result string, hr error) { - ctx, span := trace.StartSpan(ctx, "HcsSaveComputeSystem") + ctx, span := oc.StartSpan(ctx, "HcsSaveComputeSystem") defer span.End() defer func() { if result != "" { diff --git a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go index cae55058deb81..42368872b733b 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/vmcompute/zsyscall_windows.go @@ -1,4 +1,6 @@ -// Code generated mksyscall_windows.exe DO NOT EDIT +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. package vmcompute @@ -19,6 +21,7 @@ const ( var ( errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL ) // errnoErr returns common boxed Errno values, to prevent @@ -26,7 +29,7 @@ var ( func errnoErr(e syscall.Errno) error { switch e { case 0: - return nil + return errERROR_EINVAL case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } @@ -39,48 +42,55 @@ func errnoErr(e syscall.Errno) error { var ( modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") - procHcsEnumerateComputeSystems = modvmcompute.NewProc("HcsEnumerateComputeSystems") - procHcsCreateComputeSystem = modvmcompute.NewProc("HcsCreateComputeSystem") - procHcsOpenComputeSystem = modvmcompute.NewProc("HcsOpenComputeSystem") procHcsCloseComputeSystem = modvmcompute.NewProc("HcsCloseComputeSystem") - procHcsStartComputeSystem = modvmcompute.NewProc("HcsStartComputeSystem") - procHcsShutdownComputeSystem = modvmcompute.NewProc("HcsShutdownComputeSystem") - procHcsTerminateComputeSystem = modvmcompute.NewProc("HcsTerminateComputeSystem") - procHcsPauseComputeSystem = modvmcompute.NewProc("HcsPauseComputeSystem") - procHcsResumeComputeSystem = modvmcompute.NewProc("HcsResumeComputeSystem") + procHcsCloseProcess = modvmcompute.NewProc("HcsCloseProcess") + procHcsCreateComputeSystem = modvmcompute.NewProc("HcsCreateComputeSystem") + procHcsCreateProcess = modvmcompute.NewProc("HcsCreateProcess") + procHcsEnumerateComputeSystems = modvmcompute.NewProc("HcsEnumerateComputeSystems") procHcsGetComputeSystemProperties = modvmcompute.NewProc("HcsGetComputeSystemProperties") + procHcsGetProcessInfo = modvmcompute.NewProc("HcsGetProcessInfo") + procHcsGetProcessProperties = modvmcompute.NewProc("HcsGetProcessProperties") + procHcsGetServiceProperties = modvmcompute.NewProc("HcsGetServiceProperties") procHcsModifyComputeSystem = modvmcompute.NewProc("HcsModifyComputeSystem") + procHcsModifyProcess = modvmcompute.NewProc("HcsModifyProcess") procHcsModifyServiceSettings = modvmcompute.NewProc("HcsModifyServiceSettings") + procHcsOpenComputeSystem = modvmcompute.NewProc("HcsOpenComputeSystem") + procHcsOpenProcess = modvmcompute.NewProc("HcsOpenProcess") + procHcsPauseComputeSystem = modvmcompute.NewProc("HcsPauseComputeSystem") procHcsRegisterComputeSystemCallback = modvmcompute.NewProc("HcsRegisterComputeSystemCallback") - procHcsUnregisterComputeSystemCallback = modvmcompute.NewProc("HcsUnregisterComputeSystemCallback") + procHcsRegisterProcessCallback = modvmcompute.NewProc("HcsRegisterProcessCallback") + procHcsResumeComputeSystem = modvmcompute.NewProc("HcsResumeComputeSystem") procHcsSaveComputeSystem = modvmcompute.NewProc("HcsSaveComputeSystem") - procHcsCreateProcess = modvmcompute.NewProc("HcsCreateProcess") - procHcsOpenProcess = modvmcompute.NewProc("HcsOpenProcess") - procHcsCloseProcess = modvmcompute.NewProc("HcsCloseProcess") - procHcsTerminateProcess = modvmcompute.NewProc("HcsTerminateProcess") + procHcsShutdownComputeSystem = modvmcompute.NewProc("HcsShutdownComputeSystem") procHcsSignalProcess = modvmcompute.NewProc("HcsSignalProcess") - procHcsGetProcessInfo = modvmcompute.NewProc("HcsGetProcessInfo") - procHcsGetProcessProperties = modvmcompute.NewProc("HcsGetProcessProperties") - procHcsModifyProcess = modvmcompute.NewProc("HcsModifyProcess") - procHcsGetServiceProperties = modvmcompute.NewProc("HcsGetServiceProperties") - procHcsRegisterProcessCallback = modvmcompute.NewProc("HcsRegisterProcessCallback") + procHcsStartComputeSystem = modvmcompute.NewProc("HcsStartComputeSystem") + procHcsTerminateComputeSystem = modvmcompute.NewProc("HcsTerminateComputeSystem") + procHcsTerminateProcess = modvmcompute.NewProc("HcsTerminateProcess") + procHcsUnregisterComputeSystemCallback = modvmcompute.NewProc("HcsUnregisterComputeSystemCallback") procHcsUnregisterProcessCallback = modvmcompute.NewProc("HcsUnregisterProcessCallback") ) -func hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) +func hcsCloseComputeSystem(computeSystem HcsSystem) (hr error) { + hr = procHcsCloseComputeSystem.Find() if hr != nil { return } - return _hcsEnumerateComputeSystems(_p0, computeSystems, result) + r0, _, _ := syscall.Syscall(procHcsCloseComputeSystem.Addr(), 1, uintptr(computeSystem), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return } -func _hcsEnumerateComputeSystems(query *uint16, computeSystems **uint16, result **uint16) (hr error) { - if hr = procHcsEnumerateComputeSystems.Find(); hr != nil { +func hcsCloseProcess(process HcsProcess) (hr error) { + hr = procHcsCloseProcess.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsEnumerateComputeSystems.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(computeSystems)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.Syscall(procHcsCloseProcess.Addr(), 1, uintptr(process), 0, 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -105,7 +115,8 @@ func hcsCreateComputeSystem(id string, configuration string, identity syscall.Ha } func _hcsCreateComputeSystem(id *uint16, configuration *uint16, identity syscall.Handle, computeSystem *HcsSystem, result **uint16) (hr error) { - if hr = procHcsCreateComputeSystem.Find(); hr != nil { + hr = procHcsCreateComputeSystem.Find() + if hr != nil { return } r0, _, _ := syscall.Syscall6(procHcsCreateComputeSystem.Addr(), 5, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(configuration)), uintptr(identity), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)), 0) @@ -118,34 +129,21 @@ func _hcsCreateComputeSystem(id *uint16, configuration *uint16, identity syscall return } -func hcsOpenComputeSystem(id string, computeSystem *HcsSystem, result **uint16) (hr error) { +func hcsCreateProcess(computeSystem HcsSystem, processParameters string, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) { var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) + _p0, hr = syscall.UTF16PtrFromString(processParameters) if hr != nil { return } - return _hcsOpenComputeSystem(_p0, computeSystem, result) -} - -func _hcsOpenComputeSystem(id *uint16, computeSystem *HcsSystem, result **uint16) (hr error) { - if hr = procHcsOpenComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsOpenComputeSystem.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return + return _hcsCreateProcess(computeSystem, _p0, processInformation, process, result) } -func hcsCloseComputeSystem(computeSystem HcsSystem) (hr error) { - if hr = procHcsCloseComputeSystem.Find(); hr != nil { +func _hcsCreateProcess(computeSystem HcsSystem, processParameters *uint16, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) { + hr = procHcsCreateProcess.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsCloseComputeSystem.Addr(), 1, uintptr(computeSystem), 0, 0) + r0, _, _ := syscall.Syscall6(procHcsCreateProcess.Addr(), 5, uintptr(computeSystem), uintptr(unsafe.Pointer(processParameters)), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -155,20 +153,21 @@ func hcsCloseComputeSystem(computeSystem HcsSystem) (hr error) { return } -func hcsStartComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { +func hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) { var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(options) + _p0, hr = syscall.UTF16PtrFromString(query) if hr != nil { return } - return _hcsStartComputeSystem(computeSystem, _p0, result) + return _hcsEnumerateComputeSystems(_p0, computeSystems, result) } -func _hcsStartComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { - if hr = procHcsStartComputeSystem.Find(); hr != nil { +func _hcsEnumerateComputeSystems(query *uint16, computeSystems **uint16, result **uint16) (hr error) { + hr = procHcsEnumerateComputeSystems.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsStartComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.Syscall(procHcsEnumerateComputeSystems.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(computeSystems)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -178,20 +177,21 @@ func _hcsStartComputeSystem(computeSystem HcsSystem, options *uint16, result **u return } -func hcsShutdownComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { +func hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) { var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(options) + _p0, hr = syscall.UTF16PtrFromString(propertyQuery) if hr != nil { return } - return _hcsShutdownComputeSystem(computeSystem, _p0, result) + return _hcsGetComputeSystemProperties(computeSystem, _p0, properties, result) } -func _hcsShutdownComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { - if hr = procHcsShutdownComputeSystem.Find(); hr != nil { +func _hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery *uint16, properties **uint16, result **uint16) (hr error) { + hr = procHcsGetComputeSystemProperties.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsShutdownComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.Syscall6(procHcsGetComputeSystemProperties.Addr(), 4, uintptr(computeSystem), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -201,20 +201,12 @@ func _hcsShutdownComputeSystem(computeSystem HcsSystem, options *uint16, result return } -func hcsTerminateComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(options) +func hcsGetProcessInfo(process HcsProcess, processInformation *HcsProcessInformation, result **uint16) (hr error) { + hr = procHcsGetProcessInfo.Find() if hr != nil { return } - return _hcsTerminateComputeSystem(computeSystem, _p0, result) -} - -func _hcsTerminateComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { - if hr = procHcsTerminateComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsTerminateComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.Syscall(procHcsGetProcessInfo.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -224,20 +216,12 @@ func _hcsTerminateComputeSystem(computeSystem HcsSystem, options *uint16, result return } -func hcsPauseComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(options) +func hcsGetProcessProperties(process HcsProcess, processProperties **uint16, result **uint16) (hr error) { + hr = procHcsGetProcessProperties.Find() if hr != nil { return } - return _hcsPauseComputeSystem(computeSystem, _p0, result) -} - -func _hcsPauseComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { - if hr = procHcsPauseComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsPauseComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.Syscall(procHcsGetProcessProperties.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processProperties)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -247,20 +231,21 @@ func _hcsPauseComputeSystem(computeSystem HcsSystem, options *uint16, result **u return } -func hcsResumeComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { +func hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) { var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(options) + _p0, hr = syscall.UTF16PtrFromString(propertyQuery) if hr != nil { return } - return _hcsResumeComputeSystem(computeSystem, _p0, result) + return _hcsGetServiceProperties(_p0, properties, result) } -func _hcsResumeComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { - if hr = procHcsResumeComputeSystem.Find(); hr != nil { +func _hcsGetServiceProperties(propertyQuery *uint16, properties **uint16, result **uint16) (hr error) { + hr = procHcsGetServiceProperties.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsResumeComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.Syscall(procHcsGetServiceProperties.Addr(), 3, uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -270,20 +255,21 @@ func _hcsResumeComputeSystem(computeSystem HcsSystem, options *uint16, result ** return } -func hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) { +func hcsModifyComputeSystem(computeSystem HcsSystem, configuration string, result **uint16) (hr error) { var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(propertyQuery) + _p0, hr = syscall.UTF16PtrFromString(configuration) if hr != nil { return } - return _hcsGetComputeSystemProperties(computeSystem, _p0, properties, result) + return _hcsModifyComputeSystem(computeSystem, _p0, result) } -func _hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery *uint16, properties **uint16, result **uint16) (hr error) { - if hr = procHcsGetComputeSystemProperties.Find(); hr != nil { +func _hcsModifyComputeSystem(computeSystem HcsSystem, configuration *uint16, result **uint16) (hr error) { + hr = procHcsModifyComputeSystem.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall6(procHcsGetComputeSystemProperties.Addr(), 4, uintptr(computeSystem), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) + r0, _, _ := syscall.Syscall(procHcsModifyComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(configuration)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -293,20 +279,21 @@ func _hcsGetComputeSystemProperties(computeSystem HcsSystem, propertyQuery *uint return } -func hcsModifyComputeSystem(computeSystem HcsSystem, configuration string, result **uint16) (hr error) { +func hcsModifyProcess(process HcsProcess, settings string, result **uint16) (hr error) { var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(configuration) + _p0, hr = syscall.UTF16PtrFromString(settings) if hr != nil { return } - return _hcsModifyComputeSystem(computeSystem, _p0, result) + return _hcsModifyProcess(process, _p0, result) } -func _hcsModifyComputeSystem(computeSystem HcsSystem, configuration *uint16, result **uint16) (hr error) { - if hr = procHcsModifyComputeSystem.Find(); hr != nil { +func _hcsModifyProcess(process HcsProcess, settings *uint16, result **uint16) (hr error) { + hr = procHcsModifyProcess.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsModifyComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(configuration)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.Syscall(procHcsModifyProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -326,7 +313,8 @@ func hcsModifyServiceSettings(settings string, result **uint16) (hr error) { } func _hcsModifyServiceSettings(settings *uint16, result **uint16) (hr error) { - if hr = procHcsModifyServiceSettings.Find(); hr != nil { + hr = procHcsModifyServiceSettings.Find() + if hr != nil { return } r0, _, _ := syscall.Syscall(procHcsModifyServiceSettings.Addr(), 2, uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)), 0) @@ -339,11 +327,21 @@ func _hcsModifyServiceSettings(settings *uint16, result **uint16) (hr error) { return } -func hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) { - if hr = procHcsRegisterComputeSystemCallback.Find(); hr != nil { +func hcsOpenComputeSystem(id string, computeSystem *HcsSystem, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(id) + if hr != nil { return } - r0, _, _ := syscall.Syscall6(procHcsRegisterComputeSystemCallback.Addr(), 4, uintptr(computeSystem), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) + return _hcsOpenComputeSystem(_p0, computeSystem, result) +} + +func _hcsOpenComputeSystem(id *uint16, computeSystem *HcsSystem, result **uint16) (hr error) { + hr = procHcsOpenComputeSystem.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsOpenComputeSystem.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -353,11 +351,12 @@ func hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr, return } -func hcsUnregisterComputeSystemCallback(callbackHandle HcsCallback) (hr error) { - if hr = procHcsUnregisterComputeSystemCallback.Find(); hr != nil { +func hcsOpenProcess(computeSystem HcsSystem, pid uint32, process *HcsProcess, result **uint16) (hr error) { + hr = procHcsOpenProcess.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsUnregisterComputeSystemCallback.Addr(), 1, uintptr(callbackHandle), 0, 0) + r0, _, _ := syscall.Syscall6(procHcsOpenProcess.Addr(), 4, uintptr(computeSystem), uintptr(pid), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0, 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -367,20 +366,21 @@ func hcsUnregisterComputeSystemCallback(callbackHandle HcsCallback) (hr error) { return } -func hcsSaveComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { +func hcsPauseComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(options) if hr != nil { return } - return _hcsSaveComputeSystem(computeSystem, _p0, result) + return _hcsPauseComputeSystem(computeSystem, _p0, result) } -func _hcsSaveComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { - if hr = procHcsSaveComputeSystem.Find(); hr != nil { +func _hcsPauseComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + hr = procHcsPauseComputeSystem.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsSaveComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.Syscall(procHcsPauseComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -390,20 +390,27 @@ func _hcsSaveComputeSystem(computeSystem HcsSystem, options *uint16, result **ui return } -func hcsCreateProcess(computeSystem HcsSystem, processParameters string, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(processParameters) +func hcsRegisterComputeSystemCallback(computeSystem HcsSystem, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) { + hr = procHcsRegisterComputeSystemCallback.Find() if hr != nil { return } - return _hcsCreateProcess(computeSystem, _p0, processInformation, process, result) + r0, _, _ := syscall.Syscall6(procHcsRegisterComputeSystemCallback.Addr(), 4, uintptr(computeSystem), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return } -func _hcsCreateProcess(computeSystem HcsSystem, processParameters *uint16, processInformation *HcsProcessInformation, process *HcsProcess, result **uint16) (hr error) { - if hr = procHcsCreateProcess.Find(); hr != nil { +func hcsRegisterProcessCallback(process HcsProcess, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) { + hr = procHcsRegisterProcessCallback.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall6(procHcsCreateProcess.Addr(), 5, uintptr(computeSystem), uintptr(unsafe.Pointer(processParameters)), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0) + r0, _, _ := syscall.Syscall6(procHcsRegisterProcessCallback.Addr(), 4, uintptr(process), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -413,11 +420,21 @@ func _hcsCreateProcess(computeSystem HcsSystem, processParameters *uint16, proce return } -func hcsOpenProcess(computeSystem HcsSystem, pid uint32, process *HcsProcess, result **uint16) (hr error) { - if hr = procHcsOpenProcess.Find(); hr != nil { +func hcsResumeComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { return } - r0, _, _ := syscall.Syscall6(procHcsOpenProcess.Addr(), 4, uintptr(computeSystem), uintptr(pid), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0, 0) + return _hcsResumeComputeSystem(computeSystem, _p0, result) +} + +func _hcsResumeComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + hr = procHcsResumeComputeSystem.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsResumeComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -427,11 +444,21 @@ func hcsOpenProcess(computeSystem HcsSystem, pid uint32, process *HcsProcess, re return } -func hcsCloseProcess(process HcsProcess) (hr error) { - if hr = procHcsCloseProcess.Find(); hr != nil { +func hcsSaveComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsCloseProcess.Addr(), 1, uintptr(process), 0, 0) + return _hcsSaveComputeSystem(computeSystem, _p0, result) +} + +func _hcsSaveComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + hr = procHcsSaveComputeSystem.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsSaveComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -441,11 +468,21 @@ func hcsCloseProcess(process HcsProcess) (hr error) { return } -func hcsTerminateProcess(process HcsProcess, result **uint16) (hr error) { - if hr = procHcsTerminateProcess.Find(); hr != nil { +func hcsShutdownComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsTerminateProcess.Addr(), 2, uintptr(process), uintptr(unsafe.Pointer(result)), 0) + return _hcsShutdownComputeSystem(computeSystem, _p0, result) +} + +func _hcsShutdownComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + hr = procHcsShutdownComputeSystem.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcsShutdownComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -465,7 +502,8 @@ func hcsSignalProcess(process HcsProcess, options string, result **uint16) (hr e } func _hcsSignalProcess(process HcsProcess, options *uint16, result **uint16) (hr error) { - if hr = procHcsSignalProcess.Find(); hr != nil { + hr = procHcsSignalProcess.Find() + if hr != nil { return } r0, _, _ := syscall.Syscall(procHcsSignalProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) @@ -478,25 +516,21 @@ func _hcsSignalProcess(process HcsProcess, options *uint16, result **uint16) (hr return } -func hcsGetProcessInfo(process HcsProcess, processInformation *HcsProcessInformation, result **uint16) (hr error) { - if hr = procHcsGetProcessInfo.Find(); hr != nil { +func hcsStartComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(options) + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsGetProcessInfo.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return + return _hcsStartComputeSystem(computeSystem, _p0, result) } -func hcsGetProcessProperties(process HcsProcess, processProperties **uint16, result **uint16) (hr error) { - if hr = procHcsGetProcessProperties.Find(); hr != nil { +func _hcsStartComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + hr = procHcsStartComputeSystem.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsGetProcessProperties.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processProperties)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.Syscall(procHcsStartComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -506,20 +540,21 @@ func hcsGetProcessProperties(process HcsProcess, processProperties **uint16, res return } -func hcsModifyProcess(process HcsProcess, settings string, result **uint16) (hr error) { +func hcsTerminateComputeSystem(computeSystem HcsSystem, options string, result **uint16) (hr error) { var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) + _p0, hr = syscall.UTF16PtrFromString(options) if hr != nil { return } - return _hcsModifyProcess(process, _p0, result) + return _hcsTerminateComputeSystem(computeSystem, _p0, result) } -func _hcsModifyProcess(process HcsProcess, settings *uint16, result **uint16) (hr error) { - if hr = procHcsModifyProcess.Find(); hr != nil { +func _hcsTerminateComputeSystem(computeSystem HcsSystem, options *uint16, result **uint16) (hr error) { + hr = procHcsTerminateComputeSystem.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsModifyProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.Syscall(procHcsTerminateComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -529,20 +564,12 @@ func _hcsModifyProcess(process HcsProcess, settings *uint16, result **uint16) (h return } -func hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(propertyQuery) +func hcsTerminateProcess(process HcsProcess, result **uint16) (hr error) { + hr = procHcsTerminateProcess.Find() if hr != nil { return } - return _hcsGetServiceProperties(_p0, properties, result) -} - -func _hcsGetServiceProperties(propertyQuery *uint16, properties **uint16, result **uint16) (hr error) { - if hr = procHcsGetServiceProperties.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsGetServiceProperties.Addr(), 3, uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result))) + r0, _, _ := syscall.Syscall(procHcsTerminateProcess.Addr(), 2, uintptr(process), uintptr(unsafe.Pointer(result)), 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -552,11 +579,12 @@ func _hcsGetServiceProperties(propertyQuery *uint16, properties **uint16, result return } -func hcsRegisterProcessCallback(process HcsProcess, callback uintptr, context uintptr, callbackHandle *HcsCallback) (hr error) { - if hr = procHcsRegisterProcessCallback.Find(); hr != nil { +func hcsUnregisterComputeSystemCallback(callbackHandle HcsCallback) (hr error) { + hr = procHcsUnregisterComputeSystemCallback.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall6(procHcsRegisterProcessCallback.Addr(), 4, uintptr(process), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) + r0, _, _ := syscall.Syscall(procHcsUnregisterComputeSystemCallback.Addr(), 1, uintptr(callbackHandle), 0, 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -567,7 +595,8 @@ func hcsRegisterProcessCallback(process HcsProcess, callback uintptr, context ui } func hcsUnregisterProcessCallback(callbackHandle HcsCallback) (hr error) { - if hr = procHcsUnregisterProcessCallback.Find(); hr != nil { + hr = procHcsUnregisterProcessCallback.Find() + if hr != nil { return } r0, _, _ := syscall.Syscall(procHcsUnregisterProcessCallback.Addr(), 1, uintptr(callbackHandle), 0, 0) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go index 5debe974d41fb..e12253c9473f9 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/activatelayer.go @@ -1,3 +1,5 @@ +//go:build windows + package wclayer import ( @@ -14,7 +16,7 @@ import ( // An activated layer must later be deactivated via DeactivateLayer. func ActivateLayer(ctx context.Context, path string) (err error) { title := "hcsshim::ActivateLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes(trace.StringAttribute("path", path)) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go new file mode 100644 index 0000000000000..ec4423effe729 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go @@ -0,0 +1,216 @@ +package wclayer + +import ( + "errors" + "io" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/hcsshim/internal/longpath" + "github.com/Microsoft/hcsshim/internal/oc" + "go.opencensus.io/trace" +) + +type baseLayerReader struct { + s *trace.Span + root string + result chan *fileEntry + proceed chan bool + currentFile *os.File + backupReader *winio.BackupFileReader +} + +func newBaseLayerReader(root string, s *trace.Span) (r *baseLayerReader) { + r = &baseLayerReader{ + s: s, + root: root, + result: make(chan *fileEntry), + proceed: make(chan bool), + } + go r.walk() + return r +} + +func (r *baseLayerReader) walkUntilCancelled() error { + root, err := longpath.LongAbs(r.root) + if err != nil { + return err + } + + r.root = root + + err = filepath.Walk(filepath.Join(r.root, filesPath), func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Indirect fix for https://github.com/moby/moby/issues/32838#issuecomment-343610048. + // Handle failure from what may be a golang bug in the conversion of + // UTF16 to UTF8 in files which are left in the recycle bin. Os.Lstat + // which is called by filepath.Walk will fail when a filename contains + // unicode characters. Skip the recycle bin regardless which is goodness. + if strings.EqualFold(path, filepath.Join(r.root, `Files\$Recycle.Bin`)) && info.IsDir() { + return filepath.SkipDir + } + + r.result <- &fileEntry{path, info, nil} + if !<-r.proceed { + return errorIterationCanceled + } + + return nil + }) + + if err == errorIterationCanceled { + return nil + } + + if err != nil { + return err + } + + utilityVMAbsPath := filepath.Join(r.root, utilityVMPath) + utilityVMFilesAbsPath := filepath.Join(r.root, utilityVMFilesPath) + + // Ignore a UtilityVM without Files, that's not _really_ a UtiltyVM + if _, err = os.Lstat(utilityVMFilesAbsPath); err != nil { + if os.IsNotExist(err) { + return io.EOF + } + return err + } + + err = filepath.Walk(utilityVMAbsPath, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if path != utilityVMAbsPath && path != utilityVMFilesAbsPath && !hasPathPrefix(path, utilityVMFilesAbsPath) { + if info.IsDir() { + return filepath.SkipDir + } + return nil + } + + r.result <- &fileEntry{path, info, nil} + if !<-r.proceed { + return errorIterationCanceled + } + + return nil + }) + + if err == errorIterationCanceled { + return nil + } + + if err != nil { + return err + } + + return io.EOF +} + +func (r *baseLayerReader) walk() { + defer close(r.result) + if !<-r.proceed { + return + } + + err := r.walkUntilCancelled() + if err != nil { + for { + r.result <- &fileEntry{err: err} + if !<-r.proceed { + return + } + } + } +} + +func (r *baseLayerReader) reset() { + if r.backupReader != nil { + r.backupReader.Close() + r.backupReader = nil + } + if r.currentFile != nil { + r.currentFile.Close() + r.currentFile = nil + } +} + +func (r *baseLayerReader) Next() (path string, size int64, fileInfo *winio.FileBasicInfo, err error) { + r.reset() + r.proceed <- true + fe := <-r.result + if fe == nil { + err = errors.New("BaseLayerReader closed") + return + } + if fe.err != nil { + err = fe.err + return + } + + path, err = filepath.Rel(r.root, fe.path) + if err != nil { + return + } + + f, err := openFileOrDir(fe.path, syscall.GENERIC_READ, syscall.OPEN_EXISTING) + if err != nil { + return + } + defer func() { + if f != nil { + f.Close() + } + }() + + fileInfo, err = winio.GetFileBasicInfo(f) + if err != nil { + return + } + + size = fe.fi.Size() + r.backupReader = winio.NewBackupFileReader(f, true) + + r.currentFile = f + f = nil + return +} + +func (r *baseLayerReader) LinkInfo() (uint32, *winio.FileIDInfo, error) { + fileStandardInfo, err := winio.GetFileStandardInfo(r.currentFile) + if err != nil { + return 0, nil, err + } + fileIDInfo, err := winio.GetFileID(r.currentFile) + if err != nil { + return 0, nil, err + } + return fileStandardInfo.NumberOfLinks, fileIDInfo, nil +} + +func (r *baseLayerReader) Read(b []byte) (int, error) { + if r.backupReader == nil { + return 0, io.EOF + } + return r.backupReader.Read(b) +} + +func (r *baseLayerReader) Close() (err error) { + defer r.s.End() + defer func() { + oc.SetSpanStatus(r.s, err) + close(r.proceed) + }() + r.proceed <- false + // The r.result channel will be closed once walk() returns + <-r.result + r.reset() + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerwriter.go similarity index 99% rename from vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go rename to vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerwriter.go index 3ec708d1ed35c..aea8b421efdbc 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerwriter.go @@ -1,3 +1,5 @@ +//go:build windows + package wclayer import ( @@ -48,7 +50,6 @@ func reapplyDirectoryTimes(root *os.File, dis []dirInfo) error { if err != nil { return err } - } return nil } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go new file mode 100644 index 0000000000000..ceb3b508354a6 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go @@ -0,0 +1,158 @@ +package wclayer + +import ( + "context" + "fmt" + "os" + "path/filepath" + "syscall" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/longpath" + "github.com/Microsoft/hcsshim/internal/oc" + "github.com/Microsoft/hcsshim/internal/safefile" + "github.com/Microsoft/hcsshim/internal/winapi" + "github.com/pkg/errors" + "go.opencensus.io/trace" + "golang.org/x/sys/windows" +) + +var hiveNames = []string{"DEFAULT", "SAM", "SECURITY", "SOFTWARE", "SYSTEM"} + +// Ensure the given file exists as an ordinary file, and create a minimal hive file if not. +func ensureHive(path string, root *os.File) (err error) { + _, err = safefile.LstatRelative(path, root) + if err != nil && !os.IsNotExist(err) { + return fmt.Errorf("accessing %s: %w", path, err) + } + + version := windows.RtlGetVersion() + if version == nil { + return fmt.Errorf("failed to get OS version") + } + + var fullPath string + fullPath, err = longpath.LongAbs(filepath.Join(root.Name(), path)) + if err != nil { + return fmt.Errorf("getting path: %w", err) + } + + var key syscall.Handle + err = winapi.ORCreateHive(&key) + if err != nil { + return fmt.Errorf("creating hive: %w", err) + } + + defer func() { + closeErr := winapi.ORCloseHive(key) + if closeErr != nil && err == nil { + err = fmt.Errorf("closing hive key: %w", closeErr) + } + }() + + err = winapi.ORSaveHive(key, fullPath, version.MajorVersion, version.MinorVersion) + if err != nil { + return fmt.Errorf("saving hive: %w", err) + } + + return nil +} + +func ensureBaseLayer(root *os.File) (hasUtilityVM bool, err error) { + // The base layer registry hives will be copied from here + const hiveSourcePath = "Files\\Windows\\System32\\config" + if err = safefile.MkdirAllRelative(hiveSourcePath, root); err != nil { + return + } + + for _, hiveName := range hiveNames { + hivePath := filepath.Join(hiveSourcePath, hiveName) + if err = ensureHive(hivePath, root); err != nil { + return + } + } + + stat, err := safefile.LstatRelative(utilityVMFilesPath, root) + + if os.IsNotExist(err) { + return false, nil + } + + if err != nil { + return + } + + if !stat.Mode().IsDir() { + fullPath := filepath.Join(root.Name(), utilityVMFilesPath) + return false, errors.Errorf("%s has unexpected file mode %s", fullPath, stat.Mode().String()) + } + + const bcdRelativePath = "EFI\\Microsoft\\Boot\\BCD" + + // Just check that this exists as a regular file. If it exists but is not a valid registry hive, + // ProcessUtilityVMImage will complain: + // "The registry could not read in, or write out, or flush, one of the files that contain the system's image of the registry." + bcdPath := filepath.Join(utilityVMFilesPath, bcdRelativePath) + + stat, err = safefile.LstatRelative(bcdPath, root) + if err != nil { + return false, errors.Wrapf(err, "UtilityVM must contain '%s'", bcdRelativePath) + } + + if !stat.Mode().IsRegular() { + fullPath := filepath.Join(root.Name(), bcdPath) + return false, errors.Errorf("%s has unexpected file mode %s", fullPath, stat.Mode().String()) + } + + return true, nil +} + +func convertToBaseLayer(ctx context.Context, root *os.File) error { + hasUtilityVM, err := ensureBaseLayer(root) + + if err != nil { + return err + } + + if err := ProcessBaseLayer(ctx, root.Name()); err != nil { + return err + } + + if !hasUtilityVM { + return nil + } + + err = safefile.EnsureNotReparsePointRelative(utilityVMPath, root) + if err != nil { + return err + } + + utilityVMPath := filepath.Join(root.Name(), utilityVMPath) + return ProcessUtilityVMImage(ctx, utilityVMPath) +} + +// ConvertToBaseLayer processes a candidate base layer, i.e. a directory +// containing the desired file content under Files/, and optionally the +// desired file content for a UtilityVM under UtilityVM/Files/ +func ConvertToBaseLayer(ctx context.Context, path string) (err error) { + title := "hcsshim::ConvertToBaseLayer" + ctx, span := trace.StartSpan(ctx, title) + defer span.End() + defer func() { oc.SetSpanStatus(span, err) }() + span.AddAttributes(trace.StringAttribute("path", path)) + + root, err := safefile.OpenRoot(path) + if err != nil { + return hcserror.New(err, title+" - failed", "") + } + defer func() { + if err2 := root.Close(); err == nil && err2 != nil { + err = hcserror.New(err2, title+" - failed", "") + } + }() + + if err = convertToBaseLayer(ctx, root); err != nil { + return hcserror.New(err, title+" - failed", "") + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go index 480aee8725caf..932475723ae64 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createlayer.go @@ -1,3 +1,5 @@ +//go:build windows + package wclayer import ( @@ -12,7 +14,7 @@ import ( // the parent layer provided. func CreateLayer(ctx context.Context, path, parent string) (err error) { title := "hcsshim::CreateLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes( diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go index 131aa94f14b8f..5c9d5d2507ea8 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/createscratchlayer.go @@ -1,3 +1,5 @@ +//go:build windows + package wclayer import ( @@ -13,7 +15,7 @@ import ( // This requires the full list of paths to all parent layers up to the base func CreateScratchLayer(ctx context.Context, path string, parentLayerPaths []string) (err error) { title := "hcsshim::CreateScratchLayer" - ctx, span := trace.StartSpan(ctx, title) + ctx, span := oc.StartSpan(ctx, title) defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes( diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go index d5bf2f5bdc5f7..e3bc77cbc83a3 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/deactivatelayer.go @@ -1,3 +1,5 @@ +//go:build windows + package wclayer import ( @@ -11,7 +13,7 @@ import ( // DeactivateLayer will dismount a layer that was mounted via ActivateLayer. func DeactivateLayer(ctx context.Context, path string) (err error) { title := "hcsshim::DeactivateLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes(trace.StringAttribute("path", path)) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go index 424467ac33107..d0a59efe12835 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/destroylayer.go @@ -1,3 +1,5 @@ +//go:build windows + package wclayer import ( @@ -12,7 +14,7 @@ import ( // path, including that layer's containing folder, if any. func DestroyLayer(ctx context.Context, path string) (err error) { title := "hcsshim::DestroyLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes(trace.StringAttribute("path", path)) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/doc.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/doc.go new file mode 100644 index 0000000000000..dd1d55580436e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/doc.go @@ -0,0 +1,4 @@ +// Package wclayer provides bindings to HCS's legacy layer management API and +// provides a higher level interface around these calls for container layer +// management. +package wclayer diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go index 035c9041e6883..e2ec27ad08fe9 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go @@ -1,3 +1,5 @@ +//go:build windows + package wclayer import ( @@ -16,7 +18,7 @@ import ( // ExpandScratchSize expands the size of a layer to at least size bytes. func ExpandScratchSize(ctx context.Context, path string, size uint64) (err error) { title := "hcsshim::ExpandScratchSize" - ctx, span := trace.StartSpan(ctx, title) + ctx, span := oc.StartSpan(ctx, title) defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes( diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go index 97b27eb7d6b8a..d4c677aabf514 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/exportlayer.go @@ -1,8 +1,9 @@ +//go:build windows + package wclayer import ( "context" - "io/ioutil" "os" "strings" @@ -19,7 +20,7 @@ import ( // perform the export. func ExportLayer(ctx context.Context, path string, exportFolderPath string, parentLayerPaths []string) (err error) { title := "hcsshim::ExportLayer" - ctx, span := trace.StartSpan(ctx, title) + ctx, span := oc.StartSpan(ctx, title) defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes( @@ -40,9 +41,16 @@ func ExportLayer(ctx context.Context, path string, exportFolderPath string, pare return nil } +// LayerReader is an interface that supports reading an existing container image layer. type LayerReader interface { + // Next advances to the next file and returns the name, size, and file info Next() (string, int64, *winio.FileBasicInfo, error) + // LinkInfo returns the number of links and the file identifier for the current file. + LinkInfo() (uint32, *winio.FileIDInfo, error) + // Read reads data from the current file, in the format of a Win32 backup stream, and + // returns the number of bytes read. Read(b []byte) (int, error) + // Close finishes the layer reading process and releases any resources. Close() error } @@ -50,7 +58,7 @@ type LayerReader interface { // The caller must have taken the SeBackupPrivilege privilege // to call this and any methods on the resulting LayerReader. func NewLayerReader(ctx context.Context, path string, parentLayerPaths []string) (_ LayerReader, err error) { - ctx, span := trace.StartSpan(ctx, "hcsshim::NewLayerReader") + ctx, span := oc.StartSpan(ctx, "hcsshim::NewLayerReader") defer func() { if err != nil { oc.SetSpanStatus(span, err) @@ -61,7 +69,12 @@ func NewLayerReader(ctx context.Context, path string, parentLayerPaths []string) trace.StringAttribute("path", path), trace.StringAttribute("parentLayerPaths", strings.Join(parentLayerPaths, ", "))) - exportPath, err := ioutil.TempDir("", "hcs") + if len(parentLayerPaths) == 0 { + // This is a base layer. It gets exported differently. + return newBaseLayerReader(path, span), nil + } + + exportPath, err := os.MkdirTemp("", "hcs") if err != nil { return nil, err } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go index 8d213f5871af6..715e06e379630 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getlayermountpath.go @@ -1,3 +1,5 @@ +//go:build windows + package wclayer import ( @@ -16,7 +18,7 @@ import ( // folder path at which the layer is stored. func GetLayerMountPath(ctx context.Context, path string) (_ string, err error) { title := "hcsshim::GetLayerMountPath" - ctx, span := trace.StartSpan(ctx, title) + ctx, span := oc.StartSpan(ctx, title) defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes(trace.StringAttribute("path", path)) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go index ae1fff8403639..5e400fb2094d9 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/getsharedbaseimages.go @@ -1,3 +1,5 @@ +//go:build windows + package wclayer import ( @@ -14,7 +16,7 @@ import ( // of registering them with the graphdriver, graph, and tagstore. func GetSharedBaseImages(ctx context.Context) (_ string, err error) { title := "hcsshim::GetSharedBaseImages" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck defer span.End() defer func() { oc.SetSpanStatus(span, err) }() diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go index 4b282fef9dba2..20217ed81b163 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/grantvmaccess.go @@ -1,3 +1,5 @@ +//go:build windows + package wclayer import ( @@ -11,7 +13,7 @@ import ( // GrantVmAccess adds access to a file for a given VM func GrantVmAccess(ctx context.Context, vmid string, filepath string) (err error) { title := "hcsshim::GrantVmAccess" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes( diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go index 687550f0be3bd..50f669a261c54 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/importlayer.go @@ -1,8 +1,9 @@ +//go:build windows + package wclayer import ( "context" - "io/ioutil" "os" "path/filepath" "strings" @@ -20,7 +21,7 @@ import ( // be present on the system at the paths provided in parentLayerPaths. func ImportLayer(ctx context.Context, path string, importFolderPath string, parentLayerPaths []string) (err error) { title := "hcsshim::ImportLayer" - ctx, span := trace.StartSpan(ctx, title) + ctx, span := oc.StartSpan(ctx, title) defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes( @@ -124,7 +125,7 @@ func (r *legacyLayerWriterWrapper) Close() (err error) { // The caller must have taken the SeBackupPrivilege and SeRestorePrivilege privileges // to call this and any methods on the resulting LayerWriter. func NewLayerWriter(ctx context.Context, path string, parentLayerPaths []string) (_ LayerWriter, err error) { - ctx, span := trace.StartSpan(ctx, "hcsshim::NewLayerWriter") + ctx, span := oc.StartSpan(ctx, "hcsshim::NewLayerWriter") defer func() { if err != nil { oc.SetSpanStatus(span, err) @@ -148,7 +149,7 @@ func NewLayerWriter(ctx context.Context, path string, parentLayerPaths []string) }, nil } - importPath, err := ioutil.TempDir("", "hcs") + importPath, err := os.MkdirTemp("", "hcs") if err != nil { return nil, err } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go index 01e67233939b5..4d82977ea132b 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerexists.go @@ -1,3 +1,5 @@ +//go:build windows + package wclayer import ( @@ -12,7 +14,7 @@ import ( // to the system. func LayerExists(ctx context.Context, path string) (_ bool, err error) { title := "hcsshim::LayerExists" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes(trace.StringAttribute("path", path)) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go index 0ce34a30f86a9..d4805f14441c9 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerid.go @@ -1,3 +1,5 @@ +//go:build windows + package wclayer import ( @@ -12,7 +14,7 @@ import ( // LayerID returns the layer ID of a layer on disk. func LayerID(ctx context.Context, path string) (_ guid.GUID, err error) { title := "hcsshim::LayerID" - ctx, span := trace.StartSpan(ctx, title) + ctx, span := oc.StartSpan(ctx, title) defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes(trace.StringAttribute("path", path)) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go index 1ec893c6af7a8..d5d2cb137a8d7 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/layerutils.go @@ -1,3 +1,5 @@ +//go:build windows + package wclayer // This file contains utility functions to support storage (graph) related @@ -11,7 +13,9 @@ import ( "github.com/sirupsen/logrus" ) -/* To pass into syscall, we need a struct matching the following: +/* +To pass into syscall, we need a struct matching the following: + enum GraphDriverType { DiffDriver, @@ -34,32 +38,34 @@ var ( stdDriverInfo = driverInfo{1, &utf16EmptyString} ) -/* To pass into syscall, we need a struct matching the following: +/* +To pass into syscall, we need a struct matching the following: + typedef struct _WC_LAYER_DESCRIPTOR { - // - // The ID of the layer - // + // + // The ID of the layer + // - GUID LayerId; + GUID LayerId; - // - // Additional flags - // + // + // Additional flags + // - union { - struct { - ULONG Reserved : 31; - ULONG Dirty : 1; // Created from sandbox as a result of snapshot - }; - ULONG Value; - } Flags; + union { + struct { + ULONG Reserved : 31; + ULONG Dirty : 1; // Created from sandbox as a result of snapshot + }; + ULONG Value; + } Flags; - // - // Path to the layer root directory, null-terminated - // + // + // Path to the layer root directory, null-terminated + // - PCWSTR Path; + PCWSTR Path; } WC_LAYER_DESCRIPTOR, *PWC_LAYER_DESCRIPTOR; */ diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go index b7f3064f26be1..ee8da5df9cd87 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go @@ -1,3 +1,5 @@ +//go:build windows + package wclayer import ( @@ -6,7 +8,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "path/filepath" "strings" @@ -262,7 +263,6 @@ func (r *legacyLayerReader) Next() (path string, size int64, fileInfo *winio.Fil // The creation time and access time get reset for files outside of the Files path. fileInfo.CreationTime = fileInfo.LastWriteTime fileInfo.LastAccessTime = fileInfo.LastWriteTime - } else { // The file attributes are written before the backup stream. var attr uint32 @@ -294,6 +294,18 @@ func (r *legacyLayerReader) Next() (path string, size int64, fileInfo *winio.Fil return } +func (r *legacyLayerReader) LinkInfo() (uint32, *winio.FileIDInfo, error) { + fileStandardInfo, err := winio.GetFileStandardInfo(r.currentFile) + if err != nil { + return 0, nil, err + } + fileIDInfo, err := winio.GetFileID(r.currentFile) + if err != nil { + return 0, nil, err + } + return fileStandardInfo.NumberOfLinks, fileIDInfo, nil +} + func (r *legacyLayerReader) Read(b []byte) (int, error) { if r.backupReader == nil { if r.currentFile == nil { @@ -349,7 +361,7 @@ type legacyLayerWriter struct { currentIsDir bool } -// newLegacyLayerWriter returns a LayerWriter that can write the contaler layer +// newLegacyLayerWriter returns a LayerWriter that can write the container layer // transport format to disk. func newLegacyLayerWriter(root string, parentRoots []string, destRoot string) (w *legacyLayerWriter, err error) { w = &legacyLayerWriter{ @@ -376,7 +388,7 @@ func newLegacyLayerWriter(root string, parentRoots []string, destRoot string) (w } w.parentRoots = append(w.parentRoots, f) } - w.bufWriter = bufio.NewWriterSize(ioutil.Discard, 65536) + w.bufWriter = bufio.NewWriterSize(io.Discard, 65536) return } @@ -419,7 +431,7 @@ func (w *legacyLayerWriter) reset() error { if err != nil { return err } - w.bufWriter.Reset(ioutil.Discard) + w.bufWriter.Reset(io.Discard) if w.currentIsDir { r := w.currentFile br := winio.NewBackupStreamReader(r) @@ -695,7 +707,7 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro // The file attributes are written before the stream. err = binary.Write(w.bufWriter, binary.LittleEndian, uint32(fileInfo.FileAttributes)) if err != nil { - w.bufWriter.Reset(ioutil.Discard) + w.bufWriter.Reset(io.Discard) return err } } @@ -730,7 +742,7 @@ func (w *legacyLayerWriter) AddLink(name string, target string) error { return errors.New("invalid hard link in layer") } - // Find to try the target of the link in a previously added file. If that + // Try to find the target of the link in a previously added file. If that // fails, search in parent layers. var selectedRoot *os.File if _, ok := w.addedFiles[target]; ok { diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go index 09950297cef91..c45fa2750c56c 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/nametoguid.go @@ -1,3 +1,5 @@ +//go:build windows + package wclayer import ( @@ -14,7 +16,7 @@ import ( // across all clients. func NameToGuid(ctx context.Context, name string) (_ guid.GUID, err error) { title := "hcsshim::NameToGuid" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes(trace.StringAttribute("objectName", name)) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go index 90129faefbb30..b66e071245f5c 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/preparelayer.go @@ -1,3 +1,5 @@ +//go:build windows + package wclayer import ( @@ -19,7 +21,7 @@ var prepareLayerLock sync.Mutex // Disabling the filter must be done via UnprepareLayer. func PrepareLayer(ctx context.Context, path string, parentLayerPaths []string) (err error) { title := "hcsshim::PrepareLayer" - ctx, span := trace.StartSpan(ctx, title) + ctx, span := oc.StartSpan(ctx, title) defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes( diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go index 30bcdff5f5539..7c49cbda45ff9 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/processimage.go @@ -1,3 +1,5 @@ +//go:build windows + package wclayer import ( @@ -12,7 +14,7 @@ import ( // The files should have been extracted to \Files. func ProcessBaseLayer(ctx context.Context, path string) (err error) { title := "hcsshim::ProcessBaseLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes(trace.StringAttribute("path", path)) @@ -28,7 +30,7 @@ func ProcessBaseLayer(ctx context.Context, path string) (err error) { // The files should have been extracted to \Files. func ProcessUtilityVMImage(ctx context.Context, path string) (err error) { title := "hcsshim::ProcessUtilityVMImage" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes(trace.StringAttribute("path", path)) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go index 71b130c525f3c..fe20702c180aa 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/unpreparelayer.go @@ -1,3 +1,5 @@ +//go:build windows + package wclayer import ( @@ -12,7 +14,7 @@ import ( // the given id. func UnprepareLayer(ctx context.Context, path string) (err error) { title := "hcsshim::UnprepareLayer" - ctx, span := trace.StartSpan(ctx, title) //nolint:ineffassign,staticcheck + ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck defer span.End() defer func() { oc.SetSpanStatus(span, err) }() span.AddAttributes(trace.StringAttribute("path", path)) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go index 9b1e06d50c5d7..39682b8171916 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/wclayer.go @@ -1,11 +1,10 @@ -// Package wclayer provides bindings to HCS's legacy layer management API and -// provides a higher level interface around these calls for container layer -// management. +//go:build windows + package wclayer import "github.com/Microsoft/go-winio/pkg/guid" -//go:generate go run ../../mksyscall_windows.go -output zsyscall_windows.go wclayer.go +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go wclayer.go //sys activateLayer(info *driverInfo, id string) (hr error) = vmcompute.ActivateLayer? //sys copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CopyLayer? diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go index 67f917f07e615..0cb509c46fc2e 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/zsyscall_windows.go @@ -1,4 +1,6 @@ -// Code generated mksyscall_windows.exe DO NOT EDIT +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. package wclayer @@ -19,6 +21,7 @@ const ( var ( errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL ) // errnoErr returns common boxed Errno values, to prevent @@ -26,7 +29,7 @@ var ( func errnoErr(e syscall.Errno) error { switch e { case 0: - return nil + return errERROR_EINVAL case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } @@ -37,33 +40,75 @@ func errnoErr(e syscall.Errno) error { } var ( - modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") - modvirtdisk = windows.NewLazySystemDLL("virtdisk.dll") modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modvirtdisk = windows.NewLazySystemDLL("virtdisk.dll") + modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") + procGetDiskFreeSpaceExW = modkernel32.NewProc("GetDiskFreeSpaceExW") + procAttachVirtualDisk = modvirtdisk.NewProc("AttachVirtualDisk") + procOpenVirtualDisk = modvirtdisk.NewProc("OpenVirtualDisk") procActivateLayer = modvmcompute.NewProc("ActivateLayer") procCopyLayer = modvmcompute.NewProc("CopyLayer") procCreateLayer = modvmcompute.NewProc("CreateLayer") procCreateSandboxLayer = modvmcompute.NewProc("CreateSandboxLayer") - procExpandSandboxSize = modvmcompute.NewProc("ExpandSandboxSize") procDeactivateLayer = modvmcompute.NewProc("DeactivateLayer") procDestroyLayer = modvmcompute.NewProc("DestroyLayer") + procExpandSandboxSize = modvmcompute.NewProc("ExpandSandboxSize") procExportLayer = modvmcompute.NewProc("ExportLayer") - procGetLayerMountPath = modvmcompute.NewProc("GetLayerMountPath") procGetBaseImages = modvmcompute.NewProc("GetBaseImages") + procGetLayerMountPath = modvmcompute.NewProc("GetLayerMountPath") + procGrantVmAccess = modvmcompute.NewProc("GrantVmAccess") procImportLayer = modvmcompute.NewProc("ImportLayer") procLayerExists = modvmcompute.NewProc("LayerExists") procNameToGuid = modvmcompute.NewProc("NameToGuid") procPrepareLayer = modvmcompute.NewProc("PrepareLayer") - procUnprepareLayer = modvmcompute.NewProc("UnprepareLayer") procProcessBaseImage = modvmcompute.NewProc("ProcessBaseImage") procProcessUtilityImage = modvmcompute.NewProc("ProcessUtilityImage") - procGrantVmAccess = modvmcompute.NewProc("GrantVmAccess") - procOpenVirtualDisk = modvirtdisk.NewProc("OpenVirtualDisk") - procAttachVirtualDisk = modvirtdisk.NewProc("AttachVirtualDisk") - procGetDiskFreeSpaceExW = modkernel32.NewProc("GetDiskFreeSpaceExW") + procUnprepareLayer = modvmcompute.NewProc("UnprepareLayer") ) +func getDiskFreeSpaceEx(directoryName string, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(directoryName) + if err != nil { + return + } + return _getDiskFreeSpaceEx(_p0, freeBytesAvailableToCaller, totalNumberOfBytes, totalNumberOfFreeBytes) +} + +func _getDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) { + r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func attachVirtualDisk(handle syscall.Handle, sd uintptr, flags uint32, providerFlags uint32, params uintptr, overlapped uintptr) (err error) { + r1, _, e1 := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(sd), uintptr(flags), uintptr(providerFlags), uintptr(params), uintptr(overlapped)) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + +func openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(path) + if err != nil { + return + } + return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, flags, parameters, handle) +} + +func _openVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) { + r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(flags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + func activateLayer(info *driverInfo, id string) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(id) @@ -74,7 +119,8 @@ func activateLayer(info *driverInfo, id string) (hr error) { } func _activateLayer(info *driverInfo, id *uint16) (hr error) { - if hr = procActivateLayer.Find(); hr != nil { + hr = procActivateLayer.Find() + if hr != nil { return } r0, _, _ := syscall.Syscall(procActivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) @@ -102,13 +148,14 @@ func copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LA } func _copyLayer(info *driverInfo, srcId *uint16, dstId *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + hr = procCopyLayer.Find() + if hr != nil { + return + } var _p2 *WC_LAYER_DESCRIPTOR if len(descriptors) > 0 { _p2 = &descriptors[0] } - if hr = procCopyLayer.Find(); hr != nil { - return - } r0, _, _ := syscall.Syscall6(procCopyLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(srcId)), uintptr(unsafe.Pointer(dstId)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { @@ -134,7 +181,8 @@ func createLayer(info *driverInfo, id string, parent string) (hr error) { } func _createLayer(info *driverInfo, id *uint16, parent *uint16) (hr error) { - if hr = procCreateLayer.Find(); hr != nil { + hr = procCreateLayer.Find() + if hr != nil { return } r0, _, _ := syscall.Syscall(procCreateLayer.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(parent))) @@ -157,13 +205,14 @@ func createSandboxLayer(info *driverInfo, id string, parent uintptr, descriptors } func _createSandboxLayer(info *driverInfo, id *uint16, parent uintptr, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + hr = procCreateSandboxLayer.Find() + if hr != nil { + return + } var _p1 *WC_LAYER_DESCRIPTOR if len(descriptors) > 0 { _p1 = &descriptors[0] } - if hr = procCreateSandboxLayer.Find(); hr != nil { - return - } r0, _, _ := syscall.Syscall6(procCreateSandboxLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(parent), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { @@ -174,20 +223,21 @@ func _createSandboxLayer(info *driverInfo, id *uint16, parent uintptr, descripto return } -func expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) { +func deactivateLayer(info *driverInfo, id string) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(id) if hr != nil { return } - return _expandSandboxSize(info, _p0, size) + return _deactivateLayer(info, _p0) } -func _expandSandboxSize(info *driverInfo, id *uint16, size uint64) (hr error) { - if hr = procExpandSandboxSize.Find(); hr != nil { +func _deactivateLayer(info *driverInfo, id *uint16) (hr error) { + hr = procDeactivateLayer.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall(procExpandSandboxSize.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(size)) + r0, _, _ := syscall.Syscall(procDeactivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -197,20 +247,21 @@ func _expandSandboxSize(info *driverInfo, id *uint16, size uint64) (hr error) { return } -func deactivateLayer(info *driverInfo, id string) (hr error) { +func destroyLayer(info *driverInfo, id string) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(id) if hr != nil { return } - return _deactivateLayer(info, _p0) + return _destroyLayer(info, _p0) } -func _deactivateLayer(info *driverInfo, id *uint16) (hr error) { - if hr = procDeactivateLayer.Find(); hr != nil { +func _destroyLayer(info *driverInfo, id *uint16) (hr error) { + hr = procDestroyLayer.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall(procDeactivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + r0, _, _ := syscall.Syscall(procDestroyLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -220,20 +271,21 @@ func _deactivateLayer(info *driverInfo, id *uint16) (hr error) { return } -func destroyLayer(info *driverInfo, id string) (hr error) { +func expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(id) if hr != nil { return } - return _destroyLayer(info, _p0) + return _expandSandboxSize(info, _p0, size) } -func _destroyLayer(info *driverInfo, id *uint16) (hr error) { - if hr = procDestroyLayer.Find(); hr != nil { +func _expandSandboxSize(info *driverInfo, id *uint16, size uint64) (hr error) { + hr = procExpandSandboxSize.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall(procDestroyLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) + r0, _, _ := syscall.Syscall(procExpandSandboxSize.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(size)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -258,14 +310,30 @@ func exportLayer(info *driverInfo, id string, path string, descriptors []WC_LAYE } func _exportLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + hr = procExportLayer.Find() + if hr != nil { + return + } var _p2 *WC_LAYER_DESCRIPTOR if len(descriptors) > 0 { _p2 = &descriptors[0] } - if hr = procExportLayer.Find(); hr != nil { + r0, _, _ := syscall.Syscall6(procExportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func getBaseImages(buffer **uint16) (hr error) { + hr = procGetBaseImages.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall6(procExportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) + r0, _, _ := syscall.Syscall(procGetBaseImages.Addr(), 1, uintptr(unsafe.Pointer(buffer)), 0, 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -285,7 +353,8 @@ func getLayerMountPath(info *driverInfo, id string, length *uintptr, buffer *uin } func _getLayerMountPath(info *driverInfo, id *uint16, length *uintptr, buffer *uint16) (hr error) { - if hr = procGetLayerMountPath.Find(); hr != nil { + hr = procGetLayerMountPath.Find() + if hr != nil { return } r0, _, _ := syscall.Syscall6(procGetLayerMountPath.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(length)), uintptr(unsafe.Pointer(buffer)), 0, 0) @@ -298,11 +367,26 @@ func _getLayerMountPath(info *driverInfo, id *uint16, length *uintptr, buffer *u return } -func getBaseImages(buffer **uint16) (hr error) { - if hr = procGetBaseImages.Find(); hr != nil { +func grantVmAccess(vmid string, filepath string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(vmid) + if hr != nil { return } - r0, _, _ := syscall.Syscall(procGetBaseImages.Addr(), 1, uintptr(unsafe.Pointer(buffer)), 0, 0) + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(filepath) + if hr != nil { + return + } + return _grantVmAccess(_p0, _p1) +} + +func _grantVmAccess(vmid *uint16, filepath *uint16) (hr error) { + hr = procGrantVmAccess.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procGrantVmAccess.Addr(), 2, uintptr(unsafe.Pointer(vmid)), uintptr(unsafe.Pointer(filepath)), 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -327,13 +411,14 @@ func importLayer(info *driverInfo, id string, path string, descriptors []WC_LAYE } func _importLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + hr = procImportLayer.Find() + if hr != nil { + return + } var _p2 *WC_LAYER_DESCRIPTOR if len(descriptors) > 0 { _p2 = &descriptors[0] } - if hr = procImportLayer.Find(); hr != nil { - return - } r0, _, _ := syscall.Syscall6(procImportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { @@ -354,7 +439,8 @@ func layerExists(info *driverInfo, id string, exists *uint32) (hr error) { } func _layerExists(info *driverInfo, id *uint16, exists *uint32) (hr error) { - if hr = procLayerExists.Find(); hr != nil { + hr = procLayerExists.Find() + if hr != nil { return } r0, _, _ := syscall.Syscall(procLayerExists.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(exists))) @@ -377,7 +463,8 @@ func nameToGuid(name string, guid *_guid) (hr error) { } func _nameToGuid(name *uint16, guid *_guid) (hr error) { - if hr = procNameToGuid.Find(); hr != nil { + hr = procNameToGuid.Find() + if hr != nil { return } r0, _, _ := syscall.Syscall(procNameToGuid.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(guid)), 0) @@ -400,13 +487,14 @@ func prepareLayer(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR } func _prepareLayer(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { + hr = procPrepareLayer.Find() + if hr != nil { + return + } var _p1 *WC_LAYER_DESCRIPTOR if len(descriptors) > 0 { _p1 = &descriptors[0] } - if hr = procPrepareLayer.Find(); hr != nil { - return - } r0, _, _ := syscall.Syscall6(procPrepareLayer.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0, 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { @@ -417,29 +505,6 @@ func _prepareLayer(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPT return } -func unprepareLayer(info *driverInfo, id string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _unprepareLayer(info, _p0) -} - -func _unprepareLayer(info *driverInfo, id *uint16) (hr error) { - if hr = procUnprepareLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procUnprepareLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) - } - return -} - func processBaseImage(path string) (hr error) { var _p0 *uint16 _p0, hr = syscall.UTF16PtrFromString(path) @@ -450,7 +515,8 @@ func processBaseImage(path string) (hr error) { } func _processBaseImage(path *uint16) (hr error) { - if hr = procProcessBaseImage.Find(); hr != nil { + hr = procProcessBaseImage.Find() + if hr != nil { return } r0, _, _ := syscall.Syscall(procProcessBaseImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) @@ -473,7 +539,8 @@ func processUtilityImage(path string) (hr error) { } func _processUtilityImage(path *uint16) (hr error) { - if hr = procProcessUtilityImage.Find(); hr != nil { + hr = procProcessUtilityImage.Find() + if hr != nil { return } r0, _, _ := syscall.Syscall(procProcessUtilityImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) @@ -486,25 +553,21 @@ func _processUtilityImage(path *uint16) (hr error) { return } -func grantVmAccess(vmid string, filepath string) (hr error) { +func unprepareLayer(info *driverInfo, id string) (hr error) { var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(vmid) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(filepath) + _p0, hr = syscall.UTF16PtrFromString(id) if hr != nil { return } - return _grantVmAccess(_p0, _p1) + return _unprepareLayer(info, _p0) } -func _grantVmAccess(vmid *uint16, filepath *uint16) (hr error) { - if hr = procGrantVmAccess.Find(); hr != nil { +func _unprepareLayer(info *driverInfo, id *uint16) (hr error) { + hr = procUnprepareLayer.Find() + if hr != nil { return } - r0, _, _ := syscall.Syscall(procGrantVmAccess.Addr(), 2, uintptr(unsafe.Pointer(vmid)), uintptr(unsafe.Pointer(filepath)), 0) + r0, _, _ := syscall.Syscall(procUnprepareLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -513,57 +576,3 @@ func _grantVmAccess(vmid *uint16, filepath *uint16) (hr error) { } return } - -func openVirtualDisk(virtualStorageType *virtualStorageType, path string, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(path) - if err != nil { - return - } - return _openVirtualDisk(virtualStorageType, _p0, virtualDiskAccessMask, flags, parameters, handle) -} - -func _openVirtualDisk(virtualStorageType *virtualStorageType, path *uint16, virtualDiskAccessMask uint32, flags uint32, parameters *openVirtualDiskParameters, handle *syscall.Handle) (err error) { - r1, _, e1 := syscall.Syscall6(procOpenVirtualDisk.Addr(), 6, uintptr(unsafe.Pointer(virtualStorageType)), uintptr(unsafe.Pointer(path)), uintptr(virtualDiskAccessMask), uintptr(flags), uintptr(unsafe.Pointer(parameters)), uintptr(unsafe.Pointer(handle))) - if r1 != 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func attachVirtualDisk(handle syscall.Handle, sd uintptr, flags uint32, providerFlags uint32, params uintptr, overlapped uintptr) (err error) { - r1, _, e1 := syscall.Syscall6(procAttachVirtualDisk.Addr(), 6, uintptr(handle), uintptr(sd), uintptr(flags), uintptr(providerFlags), uintptr(params), uintptr(overlapped)) - if r1 != 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getDiskFreeSpaceEx(directoryName string, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(directoryName) - if err != nil { - return - } - return _getDiskFreeSpaceEx(_p0, freeBytesAvailableToCaller, totalNumberOfBytes, totalNumberOfFreeBytes) -} - -func _getDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *int64, totalNumberOfBytes *int64, totalNumberOfFreeBytes *int64) (err error) { - r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/bindflt.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/bindflt.go new file mode 100644 index 0000000000000..559d443256185 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/bindflt.go @@ -0,0 +1,19 @@ +package winapi + +const ( + BINDFLT_FLAG_READ_ONLY_MAPPING uint32 = 0x00000001 + BINDFLT_FLAG_MERGED_BIND_MAPPING uint32 = 0x00000002 + BINDFLT_FLAG_USE_CURRENT_SILO_MAPPING uint32 = 0x00000004 +) + +// HRESULT +// BfSetupFilter( +// _In_opt_ HANDLE JobHandle, +// _In_ ULONG Flags, +// _In_ LPCWSTR VirtualizationRootPath, +// _In_ LPCWSTR VirtualizationTargetPath, +// _In_reads_opt_( VirtualizationExceptionPathCount ) LPCWSTR* VirtualizationExceptionPaths, +// _In_opt_ ULONG VirtualizationExceptionPathCount +// ); +// +//sys BfSetupFilter(jobHandle windows.Handle, flags uint32, virtRootPath *uint16, virtTargetPath *uint16, virtExceptions **uint16, virtExceptionPathCount uint32) (hr error) = bindfltapi.BfSetupFilter? diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go index def9525417e9e..4547cdd8e81d5 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/console.go @@ -1,3 +1,5 @@ +//go:build windows + package winapi import ( diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go index df28ea24216d1..7875466caddf5 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/devices.go @@ -1,3 +1,5 @@ +//go:build windows + package winapi import "github.com/Microsoft/go-winio/pkg/guid" diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/doc.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/doc.go new file mode 100644 index 0000000000000..9acc0bfc17d4e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/doc.go @@ -0,0 +1,3 @@ +// Package winapi contains various low-level bindings to Windows APIs. It can +// be thought of as an extension to golang.org/x/sys/windows. +package winapi diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/elevation.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/elevation.go new file mode 100644 index 0000000000000..40cbf8712f39c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/elevation.go @@ -0,0 +1,11 @@ +//go:build windows + +package winapi + +import ( + "golang.org/x/sys/windows" +) + +func IsElevated() bool { + return windows.GetCurrentProcessToken().IsElevated() +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go index 4e80ef68c92c5..49ce924cbe7ed 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/errors.go @@ -1,3 +1,5 @@ +//go:build windows + package winapi import "syscall" diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go index 7ce52afd5e18a..3dcb3faa0b698 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/filesystem.go @@ -1,5 +1,8 @@ +//go:build windows + package winapi +//sys CopyFileW(existingFileName *uint16, newFileName *uint16, failIfExists int32) (err error) = kernel32.CopyFileW //sys NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb *IOStatusBlock, allocationSize *uint64, fileAttributes uint32, shareAccess uint32, createDisposition uint32, createOptions uint32, eaBuffer *byte, eaLength uint32) (status uint32) = ntdll.NtCreateFile //sys NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) = ntdll.NtSetInformationFile @@ -34,34 +37,35 @@ const ( // Select entries from FILE_INFO_BY_HANDLE_CLASS. // // C declaration: -// typedef enum _FILE_INFO_BY_HANDLE_CLASS { -// FileBasicInfo, -// FileStandardInfo, -// FileNameInfo, -// FileRenameInfo, -// FileDispositionInfo, -// FileAllocationInfo, -// FileEndOfFileInfo, -// FileStreamInfo, -// FileCompressionInfo, -// FileAttributeTagInfo, -// FileIdBothDirectoryInfo, -// FileIdBothDirectoryRestartInfo, -// FileIoPriorityHintInfo, -// FileRemoteProtocolInfo, -// FileFullDirectoryInfo, -// FileFullDirectoryRestartInfo, -// FileStorageInfo, -// FileAlignmentInfo, -// FileIdInfo, -// FileIdExtdDirectoryInfo, -// FileIdExtdDirectoryRestartInfo, -// FileDispositionInfoEx, -// FileRenameInfoEx, -// FileCaseSensitiveInfo, -// FileNormalizedNameInfo, -// MaximumFileInfoByHandleClass -// } FILE_INFO_BY_HANDLE_CLASS, *PFILE_INFO_BY_HANDLE_CLASS; +// +// typedef enum _FILE_INFO_BY_HANDLE_CLASS { +// FileBasicInfo, +// FileStandardInfo, +// FileNameInfo, +// FileRenameInfo, +// FileDispositionInfo, +// FileAllocationInfo, +// FileEndOfFileInfo, +// FileStreamInfo, +// FileCompressionInfo, +// FileAttributeTagInfo, +// FileIdBothDirectoryInfo, +// FileIdBothDirectoryRestartInfo, +// FileIoPriorityHintInfo, +// FileRemoteProtocolInfo, +// FileFullDirectoryInfo, +// FileFullDirectoryRestartInfo, +// FileStorageInfo, +// FileAlignmentInfo, +// FileIdInfo, +// FileIdExtdDirectoryInfo, +// FileIdExtdDirectoryRestartInfo, +// FileDispositionInfoEx, +// FileRenameInfoEx, +// FileCaseSensitiveInfo, +// FileNormalizedNameInfo, +// MaximumFileInfoByHandleClass +// } FILE_INFO_BY_HANDLE_CLASS, *PFILE_INFO_BY_HANDLE_CLASS; // // Documentation: https://docs.microsoft.com/en-us/windows/win32/api/minwinbase/ne-minwinbase-file_info_by_handle_class const ( @@ -98,10 +102,11 @@ type FileLinkInformation struct { } // C declaration: -// typedef struct _FILE_ID_INFO { -// ULONGLONG VolumeSerialNumber; -// FILE_ID_128 FileId; -// } FILE_ID_INFO, *PFILE_ID_INFO; +// +// typedef struct _FILE_ID_INFO { +// ULONGLONG VolumeSerialNumber; +// FILE_ID_128 FileId; +// } FILE_ID_INFO, *PFILE_ID_INFO; // // Documentation: https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_id_info type FILE_ID_INFO struct { diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go index 7eb13f8f0a83f..b0deb5c72d9fd 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go @@ -1,3 +1,5 @@ +//go:build windows + package winapi import ( @@ -55,6 +57,8 @@ const ( JobObjectLimitViolationInformation uint32 = 13 JobObjectMemoryUsageInformation uint32 = 28 JobObjectNotificationLimitInformation2 uint32 = 33 + JobObjectCreateSilo uint32 = 35 + JobObjectSiloBasicInformation uint32 = 36 JobObjectIoAttribution uint32 = 42 ) @@ -111,29 +115,27 @@ type JOBOBJECT_BASIC_ACCOUNTING_INFORMATION struct { TotalTerminateProcesses uint32 } -//https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_and_io_accounting_information +// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_and_io_accounting_information type JOBOBJECT_BASIC_AND_IO_ACCOUNTING_INFORMATION struct { BasicInfo JOBOBJECT_BASIC_ACCOUNTING_INFORMATION IoInfo windows.IO_COUNTERS } -// typedef struct _JOBOBJECT_MEMORY_USAGE_INFORMATION { -// ULONG64 JobMemory; -// ULONG64 PeakJobMemoryUsed; -// } JOBOBJECT_MEMORY_USAGE_INFORMATION, *PJOBOBJECT_MEMORY_USAGE_INFORMATION; -// +// typedef struct _JOBOBJECT_MEMORY_USAGE_INFORMATION { +// ULONG64 JobMemory; +// ULONG64 PeakJobMemoryUsed; +// } JOBOBJECT_MEMORY_USAGE_INFORMATION, *PJOBOBJECT_MEMORY_USAGE_INFORMATION; type JOBOBJECT_MEMORY_USAGE_INFORMATION struct { JobMemory uint64 PeakJobMemoryUsed uint64 } -// typedef struct _JOBOBJECT_IO_ATTRIBUTION_STATS { -// ULONG_PTR IoCount; -// ULONGLONG TotalNonOverlappedQueueTime; -// ULONGLONG TotalNonOverlappedServiceTime; -// ULONGLONG TotalSize; -// } JOBOBJECT_IO_ATTRIBUTION_STATS, *PJOBOBJECT_IO_ATTRIBUTION_STATS; -// +// typedef struct _JOBOBJECT_IO_ATTRIBUTION_STATS { +// ULONG_PTR IoCount; +// ULONGLONG TotalNonOverlappedQueueTime; +// ULONGLONG TotalNonOverlappedServiceTime; +// ULONGLONG TotalSize; +// } JOBOBJECT_IO_ATTRIBUTION_STATS, *PJOBOBJECT_IO_ATTRIBUTION_STATS; type JOBOBJECT_IO_ATTRIBUTION_STATS struct { IoCount uintptr TotalNonOverlappedQueueTime uint64 @@ -141,12 +143,11 @@ type JOBOBJECT_IO_ATTRIBUTION_STATS struct { TotalSize uint64 } -// typedef struct _JOBOBJECT_IO_ATTRIBUTION_INFORMATION { -// ULONG ControlFlags; -// JOBOBJECT_IO_ATTRIBUTION_STATS ReadStats; -// JOBOBJECT_IO_ATTRIBUTION_STATS WriteStats; -// } JOBOBJECT_IO_ATTRIBUTION_INFORMATION, *PJOBOBJECT_IO_ATTRIBUTION_INFORMATION; -// +// typedef struct _JOBOBJECT_IO_ATTRIBUTION_INFORMATION { +// ULONG ControlFlags; +// JOBOBJECT_IO_ATTRIBUTION_STATS ReadStats; +// JOBOBJECT_IO_ATTRIBUTION_STATS WriteStats; +// } JOBOBJECT_IO_ATTRIBUTION_INFORMATION, *PJOBOBJECT_IO_ATTRIBUTION_INFORMATION; type JOBOBJECT_IO_ATTRIBUTION_INFORMATION struct { ControlFlags uint32 ReadStats JOBOBJECT_IO_ATTRIBUTION_STATS @@ -183,7 +184,7 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct { // LPCWSTR lpName // ); // -//sys OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) = kernel32.OpenJobObjectW +//sys OpenJobObject(desiredAccess uint32, inheritHandle int32, lpName *uint16) (handle windows.Handle, err error) = kernel32.OpenJobObjectW // DWORD SetIoRateControlInformationJobObject( // HANDLE hJob, @@ -198,6 +199,7 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct { // JOBOBJECT_IO_RATE_CONTROL_INFORMATION **InfoBlocks, // ULONG *InfoBlockCount // ); +// //sys QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName *uint16, ioRateControlInfo **JOBOBJECT_IO_RATE_CONTROL_INFORMATION, infoBlockCount *uint32) (ret uint32, err error) = kernel32.QueryIoRateControlInformationJobObject // NTSTATUS @@ -206,6 +208,7 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct { // _In_ ACCESS_MASK DesiredAccess, // _In_ POBJECT_ATTRIBUTES ObjectAttributes // ); +// //sys NtOpenJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) = ntdll.NtOpenJobObject // NTSTATUS @@ -215,4 +218,5 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct { // _In_ ACCESS_MASK DesiredAccess, // _In_opt_ POBJECT_ATTRIBUTES ObjectAttributes // ); +// //sys NtCreateJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) = ntdll.NtCreateJobObject diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/ofreg.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/ofreg.go new file mode 100644 index 0000000000000..d8f7afe8a40ee --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/ofreg.go @@ -0,0 +1,5 @@ +package winapi + +//sys ORCreateHive(key *syscall.Handle) (regerrno error) = offreg.ORCreateHive +//sys ORSaveHive(key syscall.Handle, file string, OsMajorVersion uint32, OsMinorVersion uint32) (regerrno error) = offreg.ORSaveHive +//sys ORCloseHive(key syscall.Handle) (regerrno error) = offreg.ORCloseHive diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go index 908920e8722a4..c6a149b552375 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/path.go @@ -8,4 +8,5 @@ package winapi // LPWSTR lpBuffer, // LPWSTR *lpFilePart // ); +// //sys SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath *uint16) (size uint32, err error) = kernel32.SearchPathW diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go index 222529f433a52..f4ae94cfa5849 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go @@ -20,22 +20,20 @@ const ProcessVmCounters = 3 // //sys NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQueryInformationProcess -// typedef struct _VM_COUNTERS_EX -// { -// SIZE_T PeakVirtualSize; -// SIZE_T VirtualSize; -// ULONG PageFaultCount; -// SIZE_T PeakWorkingSetSize; -// SIZE_T WorkingSetSize; -// SIZE_T QuotaPeakPagedPoolUsage; -// SIZE_T QuotaPagedPoolUsage; -// SIZE_T QuotaPeakNonPagedPoolUsage; -// SIZE_T QuotaNonPagedPoolUsage; -// SIZE_T PagefileUsage; -// SIZE_T PeakPagefileUsage; -// SIZE_T PrivateUsage; -// } VM_COUNTERS_EX, *PVM_COUNTERS_EX; -// +// typedef struct _VM_COUNTERS_EX { +// SIZE_T PeakVirtualSize; +// SIZE_T VirtualSize; +// ULONG PageFaultCount; +// SIZE_T PeakWorkingSetSize; +// SIZE_T WorkingSetSize; +// SIZE_T QuotaPeakPagedPoolUsage; +// SIZE_T QuotaPagedPoolUsage; +// SIZE_T QuotaPeakNonPagedPoolUsage; +// SIZE_T QuotaNonPagedPoolUsage; +// SIZE_T PagefileUsage; +// SIZE_T PeakPagefileUsage; +// SIZE_T PrivateUsage; +// } VM_COUNTERS_EX, *PVM_COUNTERS_EX; type VM_COUNTERS_EX struct { PeakVirtualSize uintptr VirtualSize uintptr @@ -51,13 +49,11 @@ type VM_COUNTERS_EX struct { PrivateUsage uintptr } -// typedef struct _VM_COUNTERS_EX2 -// { -// VM_COUNTERS_EX CountersEx; -// SIZE_T PrivateWorkingSetSize; -// SIZE_T SharedCommitUsage; -// } VM_COUNTERS_EX2, *PVM_COUNTERS_EX2; -// +// typedef struct _VM_COUNTERS_EX2 { +// VM_COUNTERS_EX CountersEx; +// SIZE_T PrivateWorkingSetSize; +// SIZE_T SharedCommitUsage; +// } VM_COUNTERS_EX2, *PVM_COUNTERS_EX2; type VM_COUNTERS_EX2 struct { CountersEx VM_COUNTERS_EX PrivateWorkingSetSize uintptr diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go index 78fe01a4b412a..cb494aaa65f14 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/system.go @@ -1,3 +1,5 @@ +//go:build windows + package winapi import "golang.org/x/sys/windows" diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go index 4724713e3e41c..f23141a836a1c 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/thread.go @@ -9,4 +9,5 @@ package winapi // DWORD dwCreationFlags, // LPDWORD lpThreadId // ); +// //sys CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, stackSize uint32, startAddr uintptr, parameter uintptr, creationFlags uint32, threadID *uint32) (handle windows.Handle, err error) = kernel32.CreateRemoteThread diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/user.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/user.go new file mode 100644 index 0000000000000..84d4cc294d8e1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/user.go @@ -0,0 +1,194 @@ +//go:build windows + +package winapi + +import ( + "syscall" + + "golang.org/x/sys/windows" +) + +const UserNameCharLimit = 20 + +const ( + USER_PRIV_GUEST uint32 = iota + USER_PRIV_USER + USER_PRIV_ADMIN +) + +const ( + UF_NORMAL_ACCOUNT = 0x00200 + UF_DONT_EXPIRE_PASSWD = 0x10000 +) + +const NERR_UserNotFound = syscall.Errno(0x8AD) + +// typedef struct _LOCALGROUP_MEMBERS_INFO_0 { +// PSID lgrmi0_sid; +// } LOCALGROUP_MEMBERS_INFO_0, *PLOCALGROUP_MEMBERS_INFO_0, *LPLOCALGROUP_MEMBERS_INFO_0; +type LocalGroupMembersInfo0 struct { + Sid *windows.SID +} + +// typedef struct _LOCALGROUP_INFO_1 { +// LPWSTR lgrpi1_name; +// LPWSTR lgrpi1_comment; +// } LOCALGROUP_INFO_1, *PLOCALGROUP_INFO_1, *LPLOCALGROUP_INFO_1; +type LocalGroupInfo1 struct { + Name *uint16 + Comment *uint16 +} + +// typedef struct _USER_INFO_1 { +// LPWSTR usri1_name; +// LPWSTR usri1_password; +// DWORD usri1_password_age; +// DWORD usri1_priv; +// LPWSTR usri1_home_dir; +// LPWSTR usri1_comment; +// DWORD usri1_flags; +// LPWSTR usri1_script_path; +// } USER_INFO_1, *PUSER_INFO_1, *LPUSER_INFO_1; +type UserInfo1 struct { + Name *uint16 + Password *uint16 + PasswordAge uint32 + Priv uint32 + HomeDir *uint16 + Comment *uint16 + Flags uint32 + ScriptPath *uint16 +} + +// NET_API_STATUS NET_API_FUNCTION NetLocalGroupGetInfo( +// [in] LPCWSTR servername, +// [in] LPCWSTR groupname, +// [in] DWORD level, +// [out] LPBYTE *bufptr +// ); +// +//sys netLocalGroupGetInfo(serverName *uint16, groupName *uint16, level uint32, bufptr **byte) (status error) = netapi32.NetLocalGroupGetInfo + +// NetLocalGroupGetInfo is a slightly go friendlier wrapper around the NetLocalGroupGetInfo function. Instead of taking in *uint16's, it takes in +// go strings and does the conversion internally. +func NetLocalGroupGetInfo(serverName, groupName string, level uint32, bufPtr **byte) (err error) { + var ( + serverNameUTF16 *uint16 + groupNameUTF16 *uint16 + ) + if serverName != "" { + serverNameUTF16, err = windows.UTF16PtrFromString(serverName) + if err != nil { + return err + } + } + if groupName != "" { + groupNameUTF16, err = windows.UTF16PtrFromString(groupName) + if err != nil { + return err + } + } + return netLocalGroupGetInfo( + serverNameUTF16, + groupNameUTF16, + level, + bufPtr, + ) +} + +// NET_API_STATUS NET_API_FUNCTION NetUserAdd( +// [in] LPCWSTR servername, +// [in] DWORD level, +// [in] LPBYTE buf, +// [out] LPDWORD parm_err +// ); +// +//sys netUserAdd(serverName *uint16, level uint32, buf *byte, parm_err *uint32) (status error) = netapi32.NetUserAdd + +// NetUserAdd is a slightly go friendlier wrapper around the NetUserAdd function. Instead of taking in *uint16's, it takes in +// go strings and does the conversion internally. +func NetUserAdd(serverName string, level uint32, buf *byte, parm_err *uint32) (err error) { + var serverNameUTF16 *uint16 + if serverName != "" { + serverNameUTF16, err = windows.UTF16PtrFromString(serverName) + if err != nil { + return err + } + } + return netUserAdd( + serverNameUTF16, + level, + buf, + parm_err, + ) +} + +// NET_API_STATUS NET_API_FUNCTION NetUserDel( +// [in] LPCWSTR servername, +// [in] LPCWSTR username +// ); +// +//sys netUserDel(serverName *uint16, username *uint16) (status error) = netapi32.NetUserDel + +// NetUserDel is a slightly go friendlier wrapper around the NetUserDel function. Instead of taking in *uint16's, it takes in +// go strings and does the conversion internally. +func NetUserDel(serverName, userName string) (err error) { + var ( + serverNameUTF16 *uint16 + userNameUTF16 *uint16 + ) + if serverName != "" { + serverNameUTF16, err = windows.UTF16PtrFromString(serverName) + if err != nil { + return err + } + } + if userName != "" { + userNameUTF16, err = windows.UTF16PtrFromString(userName) + if err != nil { + return err + } + } + return netUserDel( + serverNameUTF16, + userNameUTF16, + ) +} + +// NET_API_STATUS NET_API_FUNCTION NetLocalGroupAddMembers( +// [in] LPCWSTR servername, +// [in] LPCWSTR groupname, +// [in] DWORD level, +// [in] LPBYTE buf, +// [in] DWORD totalentries +// ); +// +//sys netLocalGroupAddMembers(serverName *uint16, groupName *uint16, level uint32, buf *byte, totalEntries uint32) (status error) = netapi32.NetLocalGroupAddMembers + +// NetLocalGroupAddMembers is a slightly go friendlier wrapper around the NetLocalGroupAddMembers function. Instead of taking in *uint16's, it takes in +// go strings and does the conversion internally. +func NetLocalGroupAddMembers(serverName, groupName string, level uint32, buf *byte, totalEntries uint32) (err error) { + var ( + serverNameUTF16 *uint16 + groupNameUTF16 *uint16 + ) + if serverName != "" { + serverNameUTF16, err = windows.UTF16PtrFromString(serverName) + if err != nil { + return err + } + } + if groupName != "" { + groupNameUTF16, err = windows.UTF16PtrFromString(groupName) + if err != nil { + return err + } + } + return netLocalGroupAddMembers( + serverNameUTF16, + groupNameUTF16, + level, + buf, + totalEntries, + ) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go index 859b753c246d7..a2da570707f8c 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go @@ -1,3 +1,5 @@ +//go:build windows + package winapi import ( @@ -32,7 +34,7 @@ type UnicodeString struct { // denotes the maximum number of wide chars a path can have. const NTSTRSAFE_UNICODE_STRING_MAX_CCH = 32767 -//String converts a UnicodeString to a golang string +// String converts a UnicodeString to a golang string func (uni UnicodeString) String() string { // UnicodeString is not guaranteed to be null terminated, therefore // use the UnicodeString's Length field diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go index d2cc9d9fba6d9..6a90e3a69ac9c 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go @@ -1,5 +1,3 @@ -// Package winapi contains various low-level bindings to Windows APIs. It can -// be thought of as an extension to golang.org/x/sys/windows. package winapi -//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go user.go console.go system.go net.go path.go thread.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go +//go:generate go run github.com/Microsoft/go-winio/tools/mkwinsyscall -output zsyscall_windows.go ./*.go diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go index 1f16cf0b8e159..c607245eb34d6 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go @@ -1,4 +1,6 @@ -// Code generated mksyscall_windows.exe DO NOT EDIT +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. package winapi @@ -19,6 +21,7 @@ const ( var ( errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL ) // errnoErr returns common boxed Errno values, to prevent @@ -26,7 +29,7 @@ var ( func errnoErr(e syscall.Errno) error { switch e { case 0: - return nil + return errERROR_EINVAL case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } @@ -37,44 +40,68 @@ func errnoErr(e syscall.Errno) error { } var ( - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - modntdll = windows.NewLazySystemDLL("ntdll.dll") - modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") - modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") - modcfgmgr32 = windows.NewLazySystemDLL("cfgmgr32.dll") + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modbindfltapi = windows.NewLazySystemDLL("bindfltapi.dll") + modcfgmgr32 = windows.NewLazySystemDLL("cfgmgr32.dll") + modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modnetapi32 = windows.NewLazySystemDLL("netapi32.dll") + modntdll = windows.NewLazySystemDLL("ntdll.dll") + modoffreg = windows.NewLazySystemDLL("offreg.dll") - procCreatePseudoConsole = modkernel32.NewProc("CreatePseudoConsole") - procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") - procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") - procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation") + procLogonUserW = modadvapi32.NewProc("LogonUserW") + procBfSetupFilter = modbindfltapi.NewProc("BfSetupFilter") + procCM_Get_DevNode_PropertyW = modcfgmgr32.NewProc("CM_Get_DevNode_PropertyW") + procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA") + procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA") + procCM_Locate_DevNodeW = modcfgmgr32.NewProc("CM_Locate_DevNodeW") procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId") - procSearchPathW = modkernel32.NewProc("SearchPathW") + procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") + procCopyFileW = modkernel32.NewProc("CopyFileW") + procCreatePseudoConsole = modkernel32.NewProc("CreatePseudoConsole") procCreateRemoteThread = modkernel32.NewProc("CreateRemoteThread") + procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") procIsProcessInJob = modkernel32.NewProc("IsProcessInJob") - procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") - procOpenJobObjectW = modkernel32.NewProc("OpenJobObjectW") - procSetIoRateControlInformationJobObject = modkernel32.NewProc("SetIoRateControlInformationJobObject") - procQueryIoRateControlInformationJobObject = modkernel32.NewProc("QueryIoRateControlInformationJobObject") - procNtOpenJobObject = modntdll.NewProc("NtOpenJobObject") - procNtCreateJobObject = modntdll.NewProc("NtCreateJobObject") - procLogonUserW = modadvapi32.NewProc("LogonUserW") procLocalAlloc = modkernel32.NewProc("LocalAlloc") procLocalFree = modkernel32.NewProc("LocalFree") - procNtQueryInformationProcess = modntdll.NewProc("NtQueryInformationProcess") - procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") - procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA") - procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA") - procCM_Locate_DevNodeW = modcfgmgr32.NewProc("CM_Locate_DevNodeW") - procCM_Get_DevNode_PropertyW = modcfgmgr32.NewProc("CM_Get_DevNode_PropertyW") + procOpenJobObjectW = modkernel32.NewProc("OpenJobObjectW") + procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") + procQueryIoRateControlInformationJobObject = modkernel32.NewProc("QueryIoRateControlInformationJobObject") + procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") + procSearchPathW = modkernel32.NewProc("SearchPathW") + procSetIoRateControlInformationJobObject = modkernel32.NewProc("SetIoRateControlInformationJobObject") + procNetLocalGroupAddMembers = modnetapi32.NewProc("NetLocalGroupAddMembers") + procNetLocalGroupGetInfo = modnetapi32.NewProc("NetLocalGroupGetInfo") + procNetUserAdd = modnetapi32.NewProc("NetUserAdd") + procNetUserDel = modnetapi32.NewProc("NetUserDel") procNtCreateFile = modntdll.NewProc("NtCreateFile") - procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile") + procNtCreateJobObject = modntdll.NewProc("NtCreateJobObject") procNtOpenDirectoryObject = modntdll.NewProc("NtOpenDirectoryObject") + procNtOpenJobObject = modntdll.NewProc("NtOpenJobObject") procNtQueryDirectoryObject = modntdll.NewProc("NtQueryDirectoryObject") + procNtQueryInformationProcess = modntdll.NewProc("NtQueryInformationProcess") + procNtQuerySystemInformation = modntdll.NewProc("NtQuerySystemInformation") + procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile") procRtlNtStatusToDosError = modntdll.NewProc("RtlNtStatusToDosError") + procORCloseHive = modoffreg.NewProc("ORCloseHive") + procORCreateHive = modoffreg.NewProc("ORCreateHive") + procORSaveHive = modoffreg.NewProc("ORSaveHive") ) -func createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) { - r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(hInput), uintptr(hOutput), uintptr(dwFlags), uintptr(unsafe.Pointer(hpcon)), 0) +func LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) { + r1, _, e1 := syscall.Syscall6(procLogonUserW.Addr(), 6, uintptr(unsafe.Pointer(username)), uintptr(unsafe.Pointer(domain)), uintptr(unsafe.Pointer(password)), uintptr(logonType), uintptr(logonProvider), uintptr(unsafe.Pointer(token))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func BfSetupFilter(jobHandle windows.Handle, flags uint32, virtRootPath *uint16, virtTargetPath *uint16, virtExceptions **uint16, virtExceptionPathCount uint32) (hr error) { + hr = procBfSetupFilter.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procBfSetupFilter.Addr(), 6, uintptr(jobHandle), uintptr(flags), uintptr(unsafe.Pointer(virtRootPath)), uintptr(unsafe.Pointer(virtTargetPath)), uintptr(unsafe.Pointer(virtExceptions)), uintptr(virtExceptionPathCount)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -84,13 +111,8 @@ func createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Han return } -func ClosePseudoConsole(hpc windows.Handle) { - syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(hpc), 0, 0) - return -} - -func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) { - r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(hPc), uintptr(size), 0) +func CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) { + r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_PropertyW.Addr(), 6, uintptr(dnDevInst), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(unsafe.Pointer(propertyBufferSize)), uintptr(uFlags)) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -100,135 +122,99 @@ func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) { return } -func NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) { - r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) - status = uint32(r0) - return -} - -func SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err error) { - r0, _, _ := syscall.Syscall(procSetJobCompartmentId.Addr(), 2, uintptr(handle), uintptr(compartmentId), 0) - if r0 != 0 { - win32Err = syscall.Errno(r0) +func CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error) { + r0, _, _ := syscall.Syscall6(procCM_Get_Device_ID_ListA.Addr(), 4, uintptr(unsafe.Pointer(pszFilter)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(uFlags), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) } return } -func SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath *uint16) (size uint32, err error) { - r0, _, e1 := syscall.Syscall6(procSearchPathW.Addr(), 6, uintptr(unsafe.Pointer(lpPath)), uintptr(unsafe.Pointer(lpFileName)), uintptr(unsafe.Pointer(lpExtension)), uintptr(nBufferLength), uintptr(unsafe.Pointer(lpBuffer)), uintptr(unsafe.Pointer(lpFilePath))) - size = uint32(r0) - if size == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL +func CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) { + r0, _, _ := syscall.Syscall(procCM_Get_Device_ID_List_SizeA.Addr(), 3, uintptr(unsafe.Pointer(pulLen)), uintptr(unsafe.Pointer(pszFilter)), uintptr(uFlags)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff } + hr = syscall.Errno(r0) } return } -func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, stackSize uint32, startAddr uintptr, parameter uintptr, creationFlags uint32, threadID *uint32) (handle windows.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateRemoteThread.Addr(), 7, uintptr(process), uintptr(unsafe.Pointer(sa)), uintptr(stackSize), uintptr(startAddr), uintptr(parameter), uintptr(creationFlags), uintptr(unsafe.Pointer(threadID)), 0, 0) - handle = windows.Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(pDeviceID) + if hr != nil { + return } - return + return _CMLocateDevNode(pdnDevInst, _p0, uFlags) } -func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) { - r1, _, e1 := syscall.Syscall(procIsProcessInJob.Addr(), 3, uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL +func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr error) { + r0, _, _ := syscall.Syscall(procCM_Locate_DevNodeW.Addr(), 3, uintptr(unsafe.Pointer(pdnDevInst)), uintptr(unsafe.Pointer(pDeviceID)), uintptr(uFlags)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff } + hr = syscall.Errno(r0) } return } -func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err error) { + r0, _, _ := syscall.Syscall(procSetJobCompartmentId.Addr(), 2, uintptr(handle), uintptr(compartmentId), 0) + if r0 != 0 { + win32Err = syscall.Errno(r0) } return } -func OpenJobObject(desiredAccess uint32, inheritHandle bool, lpName *uint16) (handle windows.Handle, err error) { - var _p0 uint32 - if inheritHandle { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall(procOpenJobObjectW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(lpName))) - handle = windows.Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } +func ClosePseudoConsole(hpc windows.Handle) { + syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(hpc), 0, 0) return } -func SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procSetIoRateControlInformationJobObject.Addr(), 2, uintptr(jobHandle), uintptr(unsafe.Pointer(ioRateControlInfo)), 0) - ret = uint32(r0) - if ret == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } +func CopyFileW(existingFileName *uint16, newFileName *uint16, failIfExists int32) (err error) { + r1, _, e1 := syscall.Syscall(procCopyFileW.Addr(), 3, uintptr(unsafe.Pointer(existingFileName)), uintptr(unsafe.Pointer(newFileName)), uintptr(failIfExists)) + if r1 == 0 { + err = errnoErr(e1) } return } -func QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName *uint16, ioRateControlInfo **JOBOBJECT_IO_RATE_CONTROL_INFORMATION, infoBlockCount *uint32) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall6(procQueryIoRateControlInformationJobObject.Addr(), 4, uintptr(jobHandle), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(ioRateControlInfo)), uintptr(unsafe.Pointer(infoBlockCount)), 0, 0) - ret = uint32(r0) - if ret == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL +func createPseudoConsole(size uint32, hInput windows.Handle, hOutput windows.Handle, dwFlags uint32, hpcon *windows.Handle) (hr error) { + r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(hInput), uintptr(hOutput), uintptr(dwFlags), uintptr(unsafe.Pointer(hpcon)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff } + hr = syscall.Errno(r0) } return } -func NtOpenJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) { - r0, _, _ := syscall.Syscall(procNtOpenJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) - status = uint32(r0) +func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, stackSize uint32, startAddr uintptr, parameter uintptr, creationFlags uint32, threadID *uint32) (handle windows.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateRemoteThread.Addr(), 7, uintptr(process), uintptr(unsafe.Pointer(sa)), uintptr(stackSize), uintptr(startAddr), uintptr(parameter), uintptr(creationFlags), uintptr(unsafe.Pointer(threadID)), 0, 0) + handle = windows.Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } return } -func NtCreateJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) { - r0, _, _ := syscall.Syscall(procNtCreateJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) - status = uint32(r0) +func GetActiveProcessorCount(groupNumber uint16) (amount uint32) { + r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) + amount = uint32(r0) return } -func LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) { - r1, _, e1 := syscall.Syscall6(procLogonUserW.Addr(), 6, uintptr(unsafe.Pointer(username)), uintptr(unsafe.Pointer(domain)), uintptr(unsafe.Pointer(password)), uintptr(logonType), uintptr(logonProvider), uintptr(unsafe.Pointer(token))) +func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) { + r1, _, e1 := syscall.Syscall(procIsProcessInJob.Addr(), 3, uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result))) if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } + err = errnoErr(e1) } return } @@ -244,31 +230,34 @@ func LocalFree(ptr uintptr) { return } -func NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) { - r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(processHandle), uintptr(processInfoClass), uintptr(processInfo), uintptr(processInfoLength), uintptr(unsafe.Pointer(returnLength)), 0) - status = uint32(r0) +func OpenJobObject(desiredAccess uint32, inheritHandle int32, lpName *uint16) (handle windows.Handle, err error) { + r0, _, e1 := syscall.Syscall(procOpenJobObjectW.Addr(), 3, uintptr(desiredAccess), uintptr(inheritHandle), uintptr(unsafe.Pointer(lpName))) + handle = windows.Handle(r0) + if handle == 0 { + err = errnoErr(e1) + } return } -func GetActiveProcessorCount(groupNumber uint16) (amount uint32) { - r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) - amount = uint32(r0) +func QueryInformationJobObject(jobHandle windows.Handle, infoClass uint32, jobObjectInfo unsafe.Pointer, jobObjectInformationLength uint32, lpReturnLength *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(jobHandle), uintptr(infoClass), uintptr(jobObjectInfo), uintptr(jobObjectInformationLength), uintptr(unsafe.Pointer(lpReturnLength)), 0) + if r1 == 0 { + err = errnoErr(e1) + } return } -func CMGetDeviceIDListSize(pulLen *uint32, pszFilter *byte, uFlags uint32) (hr error) { - r0, _, _ := syscall.Syscall(procCM_Get_Device_ID_List_SizeA.Addr(), 3, uintptr(unsafe.Pointer(pulLen)), uintptr(unsafe.Pointer(pszFilter)), uintptr(uFlags)) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) +func QueryIoRateControlInformationJobObject(jobHandle windows.Handle, volumeName *uint16, ioRateControlInfo **JOBOBJECT_IO_RATE_CONTROL_INFORMATION, infoBlockCount *uint32) (ret uint32, err error) { + r0, _, e1 := syscall.Syscall6(procQueryIoRateControlInformationJobObject.Addr(), 4, uintptr(jobHandle), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(ioRateControlInfo)), uintptr(unsafe.Pointer(infoBlockCount)), 0, 0) + ret = uint32(r0) + if ret == 0 { + err = errnoErr(e1) } return } -func CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags uint32) (hr error) { - r0, _, _ := syscall.Syscall6(procCM_Get_Device_ID_ListA.Addr(), 4, uintptr(unsafe.Pointer(pszFilter)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(uFlags), 0, 0) +func resizePseudoConsole(hPc windows.Handle, size uint32) (hr error) { + r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(hPc), uintptr(size), 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff @@ -278,33 +267,52 @@ func CMGetDeviceIDList(pszFilter *byte, buffer *byte, bufferLen uint32, uFlags u return } -func CMLocateDevNode(pdnDevInst *uint32, pDeviceID string, uFlags uint32) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(pDeviceID) - if hr != nil { - return +func SearchPath(lpPath *uint16, lpFileName *uint16, lpExtension *uint16, nBufferLength uint32, lpBuffer *uint16, lpFilePath *uint16) (size uint32, err error) { + r0, _, e1 := syscall.Syscall6(procSearchPathW.Addr(), 6, uintptr(unsafe.Pointer(lpPath)), uintptr(unsafe.Pointer(lpFileName)), uintptr(unsafe.Pointer(lpExtension)), uintptr(nBufferLength), uintptr(unsafe.Pointer(lpBuffer)), uintptr(unsafe.Pointer(lpFilePath))) + size = uint32(r0) + if size == 0 { + err = errnoErr(e1) } - return _CMLocateDevNode(pdnDevInst, _p0, uFlags) + return } -func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr error) { - r0, _, _ := syscall.Syscall(procCM_Locate_DevNodeW.Addr(), 3, uintptr(unsafe.Pointer(pdnDevInst)), uintptr(unsafe.Pointer(pDeviceID)), uintptr(uFlags)) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) +func SetIoRateControlInformationJobObject(jobHandle windows.Handle, ioRateControlInfo *JOBOBJECT_IO_RATE_CONTROL_INFORMATION) (ret uint32, err error) { + r0, _, e1 := syscall.Syscall(procSetIoRateControlInformationJobObject.Addr(), 2, uintptr(jobHandle), uintptr(unsafe.Pointer(ioRateControlInfo)), 0) + ret = uint32(r0) + if ret == 0 { + err = errnoErr(e1) } return } -func CMGetDevNodeProperty(dnDevInst uint32, propertyKey *DevPropKey, propertyType *uint32, propertyBuffer *uint16, propertyBufferSize *uint32, uFlags uint32) (hr error) { - r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_PropertyW.Addr(), 6, uintptr(dnDevInst), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(unsafe.Pointer(propertyBufferSize)), uintptr(uFlags)) - if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - hr = syscall.Errno(r0) +func netLocalGroupAddMembers(serverName *uint16, groupName *uint16, level uint32, buf *byte, totalEntries uint32) (status error) { + r0, _, _ := syscall.Syscall6(procNetLocalGroupAddMembers.Addr(), 5, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(groupName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(totalEntries), 0) + if r0 != 0 { + status = syscall.Errno(r0) + } + return +} + +func netLocalGroupGetInfo(serverName *uint16, groupName *uint16, level uint32, bufptr **byte) (status error) { + r0, _, _ := syscall.Syscall6(procNetLocalGroupGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(groupName)), uintptr(level), uintptr(unsafe.Pointer(bufptr)), 0, 0) + if r0 != 0 { + status = syscall.Errno(r0) + } + return +} + +func netUserAdd(serverName *uint16, level uint32, buf *byte, parm_err *uint32) (status error) { + r0, _, _ := syscall.Syscall6(procNetUserAdd.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(parm_err)), 0, 0) + if r0 != 0 { + status = syscall.Errno(r0) + } + return +} + +func netUserDel(serverName *uint16, username *uint16) (status error) { + r0, _, _ := syscall.Syscall(procNetUserDel.Addr(), 2, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(username)), 0) + if r0 != 0 { + status = syscall.Errno(r0) } return } @@ -315,8 +323,8 @@ func NtCreateFile(handle *uintptr, accessMask uint32, oa *ObjectAttributes, iosb return } -func NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) { - r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class), 0) +func NtCreateJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) { + r0, _, _ := syscall.Syscall(procNtCreateJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) status = uint32(r0) return } @@ -327,24 +335,44 @@ func NtOpenDirectoryObject(handle *uintptr, accessMask uint32, oa *ObjectAttribu return } +func NtOpenJobObject(jobHandle *windows.Handle, desiredAccess uint32, objAttributes *ObjectAttributes) (status uint32) { + r0, _, _ := syscall.Syscall(procNtOpenJobObject.Addr(), 3, uintptr(unsafe.Pointer(jobHandle)), uintptr(desiredAccess), uintptr(unsafe.Pointer(objAttributes))) + status = uint32(r0) + return +} + func NtQueryDirectoryObject(handle uintptr, buffer *byte, length uint32, singleEntry bool, restartScan bool, context *uint32, returnLength *uint32) (status uint32) { var _p0 uint32 if singleEntry { _p0 = 1 - } else { - _p0 = 0 } var _p1 uint32 if restartScan { _p1 = 1 - } else { - _p1 = 0 } r0, _, _ := syscall.Syscall9(procNtQueryDirectoryObject.Addr(), 7, uintptr(handle), uintptr(unsafe.Pointer(buffer)), uintptr(length), uintptr(_p0), uintptr(_p1), uintptr(unsafe.Pointer(context)), uintptr(unsafe.Pointer(returnLength)), 0, 0) status = uint32(r0) return } +func NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo unsafe.Pointer, processInfoLength uint32, returnLength *uint32) (status uint32) { + r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(processHandle), uintptr(processInfoClass), uintptr(processInfo), uintptr(processInfoLength), uintptr(unsafe.Pointer(returnLength)), 0) + status = uint32(r0) + return +} + +func NtQuerySystemInformation(systemInfoClass int, systemInformation unsafe.Pointer, systemInfoLength uint32, returnLength *uint32) (status uint32) { + r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(systemInfoClass), uintptr(systemInformation), uintptr(systemInfoLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) + status = uint32(r0) + return +} + +func NtSetInformationFile(handle uintptr, iosb *IOStatusBlock, information uintptr, length uint32, class uint32) (status uint32) { + r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(information), uintptr(length), uintptr(class), 0) + status = uint32(r0) + return +} + func RtlNtStatusToDosError(status uint32) (winerr error) { r0, _, _ := syscall.Syscall(procRtlNtStatusToDosError.Addr(), 1, uintptr(status), 0, 0) if r0 != 0 { @@ -352,3 +380,36 @@ func RtlNtStatusToDosError(status uint32) (winerr error) { } return } + +func ORCloseHive(key syscall.Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procORCloseHive.Addr(), 1, uintptr(key), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func ORCreateHive(key *syscall.Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procORCreateHive.Addr(), 1, uintptr(unsafe.Pointer(key)), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func ORSaveHive(key syscall.Handle, file string, OsMajorVersion uint32, OsMinorVersion uint32) (regerrno error) { + var _p0 *uint16 + _p0, regerrno = syscall.UTF16PtrFromString(file) + if regerrno != nil { + return + } + return _ORSaveHive(key, _p0, OsMajorVersion, OsMinorVersion) +} + +func _ORSaveHive(key syscall.Handle, file *uint16, OsMajorVersion uint32, OsMinorVersion uint32) (regerrno error) { + r0, _, _ := syscall.Syscall6(procORSaveHive.Addr(), 4, uintptr(key), uintptr(unsafe.Pointer(file)), uintptr(OsMajorVersion), uintptr(OsMinorVersion), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/layer.go b/vendor/github.com/Microsoft/hcsshim/layer.go index 8916163706cfe..afd1ddd0aee67 100644 --- a/vendor/github.com/Microsoft/hcsshim/layer.go +++ b/vendor/github.com/Microsoft/hcsshim/layer.go @@ -1,3 +1,5 @@ +//go:build windows + package hcsshim import ( @@ -68,6 +70,9 @@ func ProcessUtilityVMImage(path string) error { func UnprepareLayer(info DriverInfo, layerId string) error { return wclayer.UnprepareLayer(context.Background(), layerPath(&info, layerId)) } +func ConvertToBaseLayer(path string) error { + return wclayer.ConvertToBaseLayer(context.Background(), path) +} type DriverInfo struct { Flavour int diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go b/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go index 3ab3bcd89a118..6c435d2b64954 100644 --- a/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go @@ -45,6 +45,15 @@ func Build() uint16 { return Get().Build } -func (osv OSVersion) ToString() string { +// String returns the OSVersion formatted as a string. It implements the +// [fmt.Stringer] interface. +func (osv OSVersion) String() string { return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build) } + +// ToString returns the OSVersion formatted as a string. +// +// Deprecated: use [OSVersion.String]. +func (osv OSVersion) ToString() string { + return osv.String() +} diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go index 75dce5d821d19..446369591a80a 100644 --- a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go +++ b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go @@ -1,37 +1,63 @@ package osversion +// Windows Client and Server build numbers. +// +// See: +// https://learn.microsoft.com/en-us/windows/release-health/release-information +// https://learn.microsoft.com/en-us/windows/release-health/windows-server-release-info +// https://learn.microsoft.com/en-us/windows/release-health/windows11-release-information const ( // RS1 (version 1607, codename "Redstone 1") corresponds to Windows Server // 2016 (ltsc2016) and Windows 10 (Anniversary Update). RS1 = 14393 + // V1607 (version 1607, codename "Redstone 1") is an alias for [RS1]. + V1607 = RS1 + // LTSC2016 (Windows Server 2016) is an alias for [RS1]. + LTSC2016 = RS1 // RS2 (version 1703, codename "Redstone 2") was a client-only update, and // corresponds to Windows 10 (Creators Update). RS2 = 15063 + // V1703 (version 1703, codename "Redstone 2") is an alias for [RS2]. + V1703 = RS2 // RS3 (version 1709, codename "Redstone 3") corresponds to Windows Server // 1709 (Semi-Annual Channel (SAC)), and Windows 10 (Fall Creators Update). RS3 = 16299 + // V1709 (version 1709, codename "Redstone 3") is an alias for [RS3]. + V1709 = RS3 // RS4 (version 1803, codename "Redstone 4") corresponds to Windows Server // 1803 (Semi-Annual Channel (SAC)), and Windows 10 (April 2018 Update). RS4 = 17134 + // V1803 (version 1803, codename "Redstone 4") is an alias for [RS4]. + V1803 = RS4 // RS5 (version 1809, codename "Redstone 5") corresponds to Windows Server // 2019 (ltsc2019), and Windows 10 (October 2018 Update). RS5 = 17763 + // V1809 (version 1809, codename "Redstone 5") is an alias for [RS5]. + V1809 = RS5 + // LTSC2019 (Windows Server 2019) is an alias for [RS5]. + LTSC2019 = RS5 - // V19H1 (version 1903) corresponds to Windows Server 1903 (semi-annual + // V19H1 (version 1903, codename 19H1) corresponds to Windows Server 1903 (semi-annual // channel). V19H1 = 18362 + // V1903 (version 1903) is an alias for [V19H1]. + V1903 = V19H1 - // V19H2 (version 1909) corresponds to Windows Server 1909 (semi-annual + // V19H2 (version 1909, codename 19H2) corresponds to Windows Server 1909 (semi-annual // channel). V19H2 = 18363 + // V1909 (version 1909) is an alias for [V19H2]. + V1909 = V19H2 - // V20H1 (version 2004) corresponds to Windows Server 2004 (semi-annual + // V20H1 (version 2004, codename 20H1) corresponds to Windows Server 2004 (semi-annual // channel). V20H1 = 19041 + // V2004 (version 2004) is an alias for [V20H1]. + V2004 = V20H1 // V20H2 corresponds to Windows Server 20H2 (semi-annual channel). V20H2 = 19042 @@ -44,7 +70,15 @@ const ( // V21H2Server corresponds to Windows Server 2022 (ltsc2022). V21H2Server = 20348 + // LTSC2022 (Windows Server 2022) is an alias for [V21H2Server] + LTSC2022 = V21H2Server // V21H2Win11 corresponds to Windows 11 (original release). V21H2Win11 = 22000 + + // V22H2Win10 corresponds to Windows 10 (2022 Update). + V22H2Win10 = 19045 + + // V22H2Win11 corresponds to Windows 11 (2022 Update). + V22H2Win11 = 22621 ) diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/doc.go b/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/doc.go new file mode 100644 index 0000000000000..0ec1aa05c40d2 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/doc.go @@ -0,0 +1,3 @@ +// Package ociwclayer provides functions for importing and exporting Windows +// container layers from and to their OCI tar representation. +package ociwclayer diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/export.go b/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/export.go index e3f1be333db36..1c2c82c70194e 100644 --- a/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/export.go +++ b/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/export.go @@ -1,5 +1,5 @@ -// Package ociwclayer provides functions for importing and exporting Windows -// container layers from and to their OCI tar representation. +//go:build windows + package ociwclayer import ( @@ -9,11 +9,9 @@ import ( "path/filepath" "github.com/Microsoft/go-winio/backuptar" - "github.com/Microsoft/hcsshim" + "github.com/Microsoft/hcsshim/internal/wclayer" ) -var driverInfo = hcsshim.DriverInfo{} - // ExportLayerToTar writes an OCI layer tar stream from the provided on-disk layer. // The caller must specify the parent layers, if any, ordered from lowest to // highest layer. @@ -21,25 +19,25 @@ var driverInfo = hcsshim.DriverInfo{} // The layer will be mounted for this process, so the caller should ensure that // it is not currently mounted. func ExportLayerToTar(ctx context.Context, w io.Writer, path string, parentLayerPaths []string) error { - err := hcsshim.ActivateLayer(driverInfo, path) + err := wclayer.ActivateLayer(ctx, path) if err != nil { return err } defer func() { - _ = hcsshim.DeactivateLayer(driverInfo, path) + _ = wclayer.DeactivateLayer(ctx, path) }() // Prepare and unprepare the layer to ensure that it has been initialized. - err = hcsshim.PrepareLayer(driverInfo, path, parentLayerPaths) + err = wclayer.PrepareLayer(ctx, path, parentLayerPaths) if err != nil { return err } - err = hcsshim.UnprepareLayer(driverInfo, path) + err = wclayer.UnprepareLayer(ctx, path) if err != nil { return err } - r, err := hcsshim.NewLayerReader(driverInfo, path, parentLayerPaths) + r, err := wclayer.NewLayerReader(ctx, path, parentLayerPaths) if err != nil { return err } @@ -52,7 +50,9 @@ func ExportLayerToTar(ctx context.Context, w io.Writer, path string, parentLayer return cerr } -func writeTarFromLayer(ctx context.Context, r hcsshim.LayerReader, w io.Writer) error { +func writeTarFromLayer(ctx context.Context, r wclayer.LayerReader, w io.Writer) error { + linkRecords := make(map[[16]byte]string) + t := tar.NewWriter(w) for { select { @@ -78,6 +78,27 @@ func writeTarFromLayer(ctx context.Context, r hcsshim.LayerReader, w io.Writer) return err } } else { + numberOfLinks, fileIDInfo, err := r.LinkInfo() + if err != nil { + return err + } + if numberOfLinks > 1 { + if linkName, ok := linkRecords[fileIDInfo.FileID]; ok { + // We've seen this file before, by another name, so put a hardlink in the tar stream. + hdr := backuptar.BasicInfoHeader(name, 0, fileInfo) + hdr.Mode = 0644 + hdr.Typeflag = tar.TypeLink + hdr.Linkname = linkName + if err := t.WriteHeader(hdr); err != nil { + return err + } + continue + } + + // All subsequent names for this file will be hard-linked to this name + linkRecords[fileIDInfo.FileID] = filepath.ToSlash(name) + } + err = backuptar.WriteTarFileFromBackupStream(t, r, name, size, fileInfo) if err != nil { return err diff --git a/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/import.go b/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/import.go index e74a6b5946c47..c9fb6df276ebc 100644 --- a/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/import.go +++ b/vendor/github.com/Microsoft/hcsshim/pkg/ociwclayer/import.go @@ -1,3 +1,5 @@ +//go:build windows + package ociwclayer import ( @@ -12,7 +14,7 @@ import ( winio "github.com/Microsoft/go-winio" "github.com/Microsoft/go-winio/backuptar" - "github.com/Microsoft/hcsshim" + "github.com/Microsoft/hcsshim/internal/wclayer" ) const whiteoutPrefix = ".wh." @@ -41,7 +43,7 @@ func ImportLayerFromTar(ctx context.Context, r io.Reader, path string, parentLay if err != nil { return 0, err } - w, err := hcsshim.NewLayerWriter(hcsshim.DriverInfo{}, path, parentLayerPaths) + w, err := wclayer.NewLayerWriter(ctx, path, parentLayerPaths) if err != nil { return 0, err } @@ -56,7 +58,7 @@ func ImportLayerFromTar(ctx context.Context, r io.Reader, path string, parentLay return n, nil } -func writeLayerFromTar(ctx context.Context, r io.Reader, w hcsshim.LayerWriter, root string) (int64, error) { +func writeLayerFromTar(ctx context.Context, r io.Reader, w wclayer.LayerWriter, root string) (int64, error) { t := tar.NewReader(r) hdr, err := t.Next() totalSize := int64(0) diff --git a/vendor/github.com/Microsoft/hcsshim/process.go b/vendor/github.com/Microsoft/hcsshim/process.go index 3362c683357a9..44df91cde2913 100644 --- a/vendor/github.com/Microsoft/hcsshim/process.go +++ b/vendor/github.com/Microsoft/hcsshim/process.go @@ -1,3 +1,5 @@ +//go:build windows + package hcsshim import ( diff --git a/vendor/github.com/Microsoft/hcsshim/tools.go b/vendor/github.com/Microsoft/hcsshim/tools.go new file mode 100644 index 0000000000000..3964e2f02330a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/tools.go @@ -0,0 +1,5 @@ +//go:build tools + +package hcsshim + +import _ "github.com/Microsoft/go-winio/tools/mkwinsyscall" diff --git a/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go index 8bed848573831..9b619b6e6263c 100644 --- a/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/zsyscall_windows.go @@ -1,4 +1,6 @@ -// Code generated mksyscall_windows.exe DO NOT EDIT +//go:build windows + +// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. package hcsshim @@ -19,6 +21,7 @@ const ( var ( errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL ) // errnoErr returns common boxed Errno values, to prevent @@ -26,7 +29,7 @@ var ( func errnoErr(e syscall.Errno) error { switch e { case 0: - return nil + return errERROR_EINVAL case errnoERROR_IO_PENDING: return errERROR_IO_PENDING } diff --git a/vendor/github.com/cenkalti/backoff/v4/.travis.yml b/vendor/github.com/cenkalti/backoff/v4/.travis.yml deleted file mode 100644 index c79105c2fbebe..0000000000000 --- a/vendor/github.com/cenkalti/backoff/v4/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go: - - 1.13 - - 1.x - - tip -before_install: - - go get github.com/mattn/goveralls - - go get golang.org/x/tools/cmd/cover -script: - - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go index 3d3453215bb30..2c56c1e7189ba 100644 --- a/vendor/github.com/cenkalti/backoff/v4/exponential.go +++ b/vendor/github.com/cenkalti/backoff/v4/exponential.go @@ -147,6 +147,9 @@ func (b *ExponentialBackOff) incrementCurrentInterval() { // Returns a random value from the following interval: // [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval]. func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + if randomizationFactor == 0 { + return currentInterval // make sure no randomness is used when randomizationFactor is 0. + } var delta = randomizationFactor * float64(currentInterval) var minInterval = float64(currentInterval) - delta var maxInterval = float64(currentInterval) + delta diff --git a/vendor/github.com/cenkalti/backoff/v4/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go index 1ce2507ebc8be..b9c0c51cd755f 100644 --- a/vendor/github.com/cenkalti/backoff/v4/retry.go +++ b/vendor/github.com/cenkalti/backoff/v4/retry.go @@ -5,10 +5,20 @@ import ( "time" ) +// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData(). +// The operation will be retried using a backoff policy if it returns an error. +type OperationWithData[T any] func() (T, error) + // An Operation is executing by Retry() or RetryNotify(). // The operation will be retried using a backoff policy if it returns an error. type Operation func() error +func (o Operation) withEmptyData() OperationWithData[struct{}] { + return func() (struct{}, error) { + return struct{}{}, o() + } +} + // Notify is a notify-on-error function. It receives an operation error and // backoff delay if the operation failed (with an error). // @@ -28,18 +38,41 @@ func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) } +// RetryWithData is like Retry but returns data in the response too. +func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) { + return RetryNotifyWithData(o, b, nil) +} + // RetryNotify calls notify function with the error and wait duration // for each failed attempt before sleep. func RetryNotify(operation Operation, b BackOff, notify Notify) error { return RetryNotifyWithTimer(operation, b, notify, nil) } +// RetryNotifyWithData is like RetryNotify but returns data in the response too. +func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) { + return doRetryNotify(operation, b, notify, nil) +} + // RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer // for each failed attempt before sleep. // A default timer that uses system timer is used when nil is passed. func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error { - var err error - var next time.Duration + _, err := doRetryNotify(operation.withEmptyData(), b, notify, t) + return err +} + +// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too. +func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { + return doRetryNotify(operation, b, notify, t) +} + +func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) { + var ( + err error + next time.Duration + res T + ) if t == nil { t = &defaultTimer{} } @@ -52,21 +85,22 @@ func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer b.Reset() for { - if err = operation(); err == nil { - return nil + res, err = operation() + if err == nil { + return res, nil } var permanent *PermanentError if errors.As(err, &permanent) { - return permanent.Err + return res, permanent.Err } if next = b.NextBackOff(); next == Stop { if cerr := ctx.Err(); cerr != nil { - return cerr + return res, cerr } - return err + return res, err } if notify != nil { @@ -77,7 +111,7 @@ func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer select { case <-ctx.Done(): - return ctx.Err() + return res, ctx.Err() case <-t.C(): } } diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md index 792b4a60b3465..8bf0e5b781532 100644 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ b/vendor/github.com/cespare/xxhash/v2/README.md @@ -3,8 +3,7 @@ [![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2) [![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml) -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a high-quality hashing algorithm that is much faster than anything in the Go standard library. @@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error) func (*Digest) Sum64() uint64 ``` -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ ## Compatibility @@ -45,19 +47,20 @@ I recommend using the latest release of Go. Here are some quick benchmarks comparing the pure-Go and assembly implementations of Sum64. -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: ``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') ``` ## Projects using this package diff --git a/vendor/github.com/cespare/xxhash/v2/testall.sh b/vendor/github.com/cespare/xxhash/v2/testall.sh new file mode 100644 index 0000000000000..94b9c443987cf --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/testall.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -eu -o pipefail + +# Small convenience script for running the tests with various combinations of +# arch/tags. This assumes we're running on amd64 and have qemu available. + +go test ./... +go test -tags purego ./... +GOARCH=arm64 go test +GOARCH=arm64 go test -tags purego diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go index 15c835d5417c0..a9e0d45c9dcc7 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go @@ -16,19 +16,11 @@ const ( prime5 uint64 = 2870177450012600261 ) -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. type Digest struct { @@ -50,10 +42,10 @@ func New() *Digest { // Reset clears the Digest's state so that it can be reused. func (d *Digest) Reset() { - d.v1 = prime1v + prime2 + d.v1 = primes[0] + prime2 d.v2 = prime2 d.v3 = 0 - d.v4 = -prime1v + d.v4 = -primes[0] d.total = 0 d.n = 0 } @@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) { n = len(b) d.total += uint64(n) + memleft := d.mem[d.n&(len(d.mem)-1):] + if d.n+n < 32 { // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) + copy(memleft, b) d.n += n return } if d.n > 0 { // Finish off the partial block. - copy(d.mem[d.n:], b) + c := copy(memleft, b) d.v1 = round(d.v1, u64(d.mem[0:8])) d.v2 = round(d.v2, u64(d.mem[8:16])) d.v3 = round(d.v3, u64(d.mem[16:24])) d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] + b = b[c:] d.n = 0 } @@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 { h += d.total - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for i < end { - h ^= uint64(d.mem[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 - i++ } h ^= h >> 33 diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s index be8db5bf79601..3e8b132579ec2 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s @@ -1,215 +1,209 @@ +//go:build !appengine && gc && !purego // +build !appengine // +build gc // +build !purego #include "textflag.h" -// Register allocation: -// AX h -// SI pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// DI prime4v - -// round reads from and advances the buffer pointer in SI. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (SI), R12 \ - ADDQ $8, SI \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ DI, acc +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop // func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), DI + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 // Load slice. - MOVQ b_base+0(FP), SI - MOVQ b_len+8(FP), DX - LEAQ (SI)(DX*1), BX + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end // The first loop limit will be len(b)-32. - SUBQ $32, BX + SUBQ $32, end // Check whether we have at least one block. - CMPQ DX, $32 + CMPQ n, $32 JLT noBlocks // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until SI > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) JMP afterBlocks noBlocks: - MOVQ ·prime5v(SB), AX + MOVQ ·primes+32(SB), h afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. - ADDQ $24, BX - - CMPQ SI, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (SI), R8 - ADDQ $8, SI - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ DI, AX - - CMPQ SI, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ SI, BX - JG singles - - MOVL (SI), R8 - ADDQ $4, SI - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ SI, BX + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end JGE finalize -singlesLoop: - MOVBQZX (SI), R12 - ADDQ $1, SI - IMULQ ·prime5v(SB), R12 - XORQ R12, AX +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h - ROLQ $11, AX - IMULQ R13, AX - - CMPQ SI, BX - JL singlesLoop + CMPQ p, end + JL loop1 finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) RET -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - // func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 // Load slice. - MOVQ b_base+8(FP), SI - MOVQ b_len+16(FP), DX - LEAQ (SI)(DX*1), BX - SUBQ $32, BX + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end // Load vN from d. - MOVQ d+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 // We don't need to check the loop condition here; this function is // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop + blockLoop() // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // The number of bytes written is SI minus the old base pointer. - SUBQ b_base+8(FP), SI - MOVQ SI, ret+32(FP) + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s new file mode 100644 index 0000000000000..7e3145a221864 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s @@ -0,0 +1,183 @@ +//go:build !appengine && gc && !purego +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Registers: +#define digest R1 +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 +#define prime1 R7 +#define prime2 R8 +#define prime3 R9 +#define prime4 R10 +#define prime5 R11 +#define v1 R12 +#define v2 R13 +#define v3 R14 +#define v4 R15 +#define x1 R20 +#define x2 R21 +#define x3 R22 +#define x4 R23 + +#define round(acc, x) \ + MADD prime2, acc, x, acc \ + ROR $64-31, acc \ + MUL prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + MUL prime2, x \ + ROR $64-31, x \ + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) + + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 + + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop + + ADD prime1, prime2, v1 + MOVD prime2, v2 + MOVD $0, v3 + NEG prime1, v4 + + blockLoop() + + ROR $64-1, v1, x1 + ROR $64-7, v2, x2 + ADD x1, x2 + ROR $64-12, v3, x3 + ROR $64-18, v4, x4 + ADD x3, x4 + ADD x2, x4, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) + +afterLoop: + ADD n, h + + TBZ $4, n, try8 + LDP.P 16(p), (x1, x2) + + round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + + round0(x2) + ROR $64-27, h + EOR x2 @> 64-27, h, h + MADD h, prime4, prime1, h + +try8: + TBZ $3, n, try4 + MOVD.P 8(p), x1 + + round0(x1) + ROR $64-27, h + EOR x1 @> 64-27, h, h + MADD h, prime4, prime1, h + +try4: + TBZ $2, n, try2 + MOVWU.P 4(p), x2 + + MUL prime1, x2 + ROR $64-23, h + EOR x2 @> 64-23, h, h + MADD h, prime3, prime2, h + +try2: + TBZ $1, n, try1 + MOVHU.P 2(p), x3 + AND $255, x3, x1 + LSR $8, x3, x2 + + MUL prime5, x1 + ROR $64-11, h + EOR x1 @> 64-11, h, h + MUL prime1, h + + MUL prime5, x2 + ROR $64-11, h + EOR x2 @> 64-11, h, h + MUL prime1, h + +try1: + TBZ $0, n, finalize + MOVBU (p), x4 + + MUL prime5, x4 + ROR $64-11, h + EOR x4 @> 64-11, h, h + MUL prime1, h + +finalize: + EOR h >> 33, h + MUL prime2, h + EOR h >> 29, h + MUL prime3, h + EOR h >> 32, h + + MOVD h, ret+24(FP) + RET + +// func writeBlocks(d *Digest, b []byte) int +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) + + // Load state. Assume v[1-4] are stored contiguously. + MOVD d+0(FP), digest + LDP 0(digest), (v1, v2) + LDP 16(digest), (v3, v4) + + LDP b_base+8(FP), (p, n) + + blockLoop() + + // Store updated state. + STP (v1, v2), 0(digest) + STP (v3, v4), 16(digest) + + BIC $31, n + MOVD n, ret+32(FP) + RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go similarity index 73% rename from vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go rename to vendor/github.com/cespare/xxhash/v2/xxhash_asm.go index ad14b807f4d96..9216e0a40c1a4 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go @@ -1,3 +1,5 @@ +//go:build (amd64 || arm64) && !appengine && gc && !purego +// +build amd64 arm64 // +build !appengine // +build gc // +build !purego diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go index 4a5a821603e5b..26df13bba4b79 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go @@ -1,4 +1,5 @@ -// +build !amd64 appengine !gc purego +//go:build (!amd64 && !arm64) || appengine || !gc || purego +// +build !amd64,!arm64 appengine !gc purego package xxhash @@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 { var h uint64 if n >= 32 { - v1 := prime1v + prime2 + v1 := primes[0] + prime2 v2 := prime2 v3 := uint64(0) - v4 := -prime1v + v4 := -primes[0] for len(b) >= 32 { v1 = round(v1, u64(b[0:8:len(b)])) v2 = round(v2, u64(b[8:16:len(b)])) @@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 { h += uint64(n) - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go index fc9bea7a31f2b..e86f1b5fd8e4c 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go @@ -1,3 +1,4 @@ +//go:build appengine // +build appengine // This file contains the safe implementations of otherwise unsafe-using code. diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go index 376e0ca2e4974..1c1638fd88a1d 100644 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go @@ -1,3 +1,4 @@ +//go:build !appengine // +build !appengine // This file encapsulates usage of unsafe. @@ -11,7 +12,7 @@ import ( // In the future it's possible that compiler optimizations will make these // XxxString functions unnecessary by realizing that calls such as -// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205. +// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205. // If that happens, even if we keep these functions they can be replaced with // the trivial safe code. diff --git a/vendor/github.com/containerd/containerd/.cirrus.yml b/vendor/github.com/containerd/containerd/.cirrus.yml new file mode 100644 index 0000000000000..1252edffa4977 --- /dev/null +++ b/vendor/github.com/containerd/containerd/.cirrus.yml @@ -0,0 +1,80 @@ +# Cirrus CI gives open-source projects free 16.0 CPUs, +# we use 4 CPUs x 3 tasks = 12 CPUs. +# https://cirrus-ci.org/faq/#are-there-any-limits +# +# Undocumented constraints; +# - The maximum memory limit is 4G times the number of CPUs. +# - The number of CPUs should be multiple of 2. + +task: + name: Vagrant + + compute_engine_instance: + image_project: cirrus-images + image: family/docker-kvm + platform: linux + nested_virtualization: true + cpu: 4 + memory: 16G + + env: + GOTEST: gotestsum -- + # By default, Cirrus CI doesn't have HOME defined + HOME: /root + matrix: + BOX: fedora/37-cloud-base + # v7.0.0 does not boot. v6.0.0 was not released. + BOX: rockylinux/8@5.0.0 + install_libvirt_vagrant_script: | + apt-get update + apt-get install -y libvirt-daemon libvirt-daemon-system vagrant vagrant-libvirt + systemctl enable --now libvirtd + + vagrant_cache: + folder: /root/.vagrant.d + fingerprint_script: uname --kernel-release --kernel-version && cat Vagrantfile + + vagrant_up_script: | + vagrant up --no-tty + + integration_script: | + vagrant up --provision-with=selinux,install-runc,install-gotestsum,test-integration + + cri_integration_script: | + vagrant up --provision-with=selinux,install-runc,install-gotestsum,test-cri-integration + + cri_test_script: | + vagrant up --provision-with=selinux,install-runc,install-gotestsum,test-cri + +task: + name: CGroupsV2 - rootless CRI test + + env: + HOME: /root + + compute_engine_instance: + image_project: cirrus-images + image: family/docker-kvm + platform: linux + nested_virtualization: true + cpu: 4 + memory: 16G + + install_libvirt_vagrant_script: | + apt-get update + apt-get install -y libvirt-daemon libvirt-daemon-system vagrant vagrant-libvirt + systemctl enable --now libvirtd + + vagrant_cache: + folder: /root/.vagrant.d + fingerprint_script: uname -a; cat Vagrantfile + + vagrant_up_script: | + vagrant up --provision-with=install-rootless-podman --no-tty + + podman_build_script: | + # Execute rootless podman to create the UserNS env + vagrant ssh -- podman build --target cri-in-userns -t cri-in-userns -f /vagrant/contrib/Dockerfile.test /vagrant + + test_script: | + vagrant ssh -- podman run --rm --privileged cri-in-userns diff --git a/vendor/github.com/containerd/containerd/.golangci.yml b/vendor/github.com/containerd/containerd/.golangci.yml index 4bf84599d717e..e835ba17681d6 100644 --- a/vendor/github.com/containerd/containerd/.golangci.yml +++ b/vendor/github.com/containerd/containerd/.golangci.yml @@ -1,27 +1,56 @@ linters: enable: - - structcheck - - varcheck - - staticcheck - - unconvert + - exportloopref # Checks for pointers to enclosing loop variables - gofmt - goimports - - revive + - gosec - ineffassign - - vet - - unused - misspell + - nolintlint + - revive + - staticcheck + - tenv # Detects using os.Setenv instead of t.Setenv since Go 1.17 + - unconvert + - unused + - vet + - dupword # Checks for duplicate words in the source code disable: - errcheck issues: include: - EXC0002 + max-issues-per-linter: 0 + max-same-issues: 0 + + # Only using / doesn't work due to https://github.com/golangci/golangci-lint/issues/1398. + exclude-rules: + - path: 'cmd[\\/]containerd[\\/]builtins[\\/]' + text: "blank-imports:" + - path: 'contrib[\\/]fuzz[\\/]' + text: "exported: func name will be used as fuzz.Fuzz" + +linters-settings: + gosec: + # The following issues surfaced when `gosec` linter + # was enabled. They are temporarily excluded to unblock + # the existing workflow, but still to be addressed by + # future works. + excludes: + - G204 + - G305 + - G306 + - G402 + - G404 run: timeout: 8m skip-dirs: - api + - cluster - design - docs - docs/man + - releases + - reports + - test # e2e scripts diff --git a/vendor/github.com/containerd/containerd/.mailmap b/vendor/github.com/containerd/containerd/.mailmap index 11dcdc48c0883..87221c9af07ed 100644 --- a/vendor/github.com/containerd/containerd/.mailmap +++ b/vendor/github.com/containerd/containerd/.mailmap @@ -1,10 +1,13 @@ Abhinandan Prativadi Abhinandan Prativadi Ace-Tang +Adam Korcz +Aditi Sharma Akihiro Suda Akihiro Suda Allen Sun Alexander Morozov +Antonio Ojea Antonio Ojea Amit Krishnan Andrei Vagin @@ -26,9 +29,12 @@ Daniel Dao Derek McGowan Edgar Lee Eric Ernst +Eric Lin Eric Ren Eric Ren Eric Ren +Fabian Hoffmann +Fabian Hoffmann <35104465+FabHof@users.noreply.github.com> Fabiano Fidêncio Fahed Dorgaa Frank Yang @@ -37,6 +43,7 @@ Fupan Li Fupan Li Furkan Türkal Georgia Panoutsakopoulou +guodong Guangming Wang Haiyan Meng haoyun @@ -51,12 +58,16 @@ Jian Liao Jian Liao Ji'an Liu Jie Zhang +Jiongchi Yu John Howard John Howard John Howard John Howard +Junyu Liu +LongtaoZhang Lorenz Brun Luc Perkins +James Sturtevant Jiajun Jiang Julien Balestra Jun Lin Chen <1913688+mc256@users.noreply.github.com> @@ -64,6 +75,7 @@ Justin Cormack Justin Terry Justin Terry Kante +Kazuyoshi Kato Kenfe-Mickaël Laventure Kevin Kern Kevin Parsons @@ -73,6 +85,7 @@ Kohei Tokunaga Krasi Georgiev Lantao Liu Lantao Liu +lengrongfu <1275177125@qq.com> Li Yuxuan Lifubang Lu Jingxiao @@ -84,6 +97,7 @@ Mario Hros Mario Macias Mark Gordon Marvin Giessing +Mathis Michel Michael Crosby Michael Katsoulis Mike Brown @@ -96,6 +110,7 @@ Nishchay Kumar Oliver Stenbom Phil Estes Phil Estes +Qian Zhang Reid Li Robin Winkelewski Ross Boucher @@ -121,20 +136,24 @@ Su Xiaolin Takumasa Sakao Ted Yu Tõnis Tiigi +Tony Fang +Tony Fang Wade Lee Wade Lee Wade Lee <21621232@zju.edu.cn> -Wang Bing wanglei wanglei wangzhan Wei Fu Wei Fu +wen chen Xiaodong Zhang Xuean Yan Yang Yang Yue Zhang Yuxing Liu +Zechun Chen +zhang he Zhang Wei zhangyadong Zhenguang Zhu diff --git a/vendor/github.com/containerd/containerd/ADOPTERS.md b/vendor/github.com/containerd/containerd/ADOPTERS.md index bbf99e7dd592d..c5e60df7b41ad 100644 --- a/vendor/github.com/containerd/containerd/ADOPTERS.md +++ b/vendor/github.com/containerd/containerd/ADOPTERS.md @@ -12,6 +12,8 @@ including the Balena project listed below. **_[IBM Cloud Private (ICP)](https://www.ibm.com/cloud/private)_** - IBM's on-premises cloud offering has containerd as a "tech preview" CRI runtime for the Kubernetes offered within this product for the past two releases, and plans to fully migrate to containerd in a future release. +**_[Google Container-Optimized OS (COS)](https://cloud.google.com/container-optimized-os/docs)_** - Container-Optimized OS is a Linux Operating System from Google that is optimized for running containers. COS has used containerd as container runtime when containerd was part of Docker's core container runtime. + **_[Google Cloud Kubernetes Engine (GKE)](https://cloud.google.com/kubernetes-engine/)_** - containerd has been offered in GKE since version 1.14 and has been the default runtime since version 1.19. It is also the only supported runtime for GKE Autopilot from the launch. [More details](https://cloud.google.com/kubernetes-engine/docs/concepts/using-containerd) **_[AWS Fargate](https://aws.amazon.com/fargate)_** - uses containerd + Firecracker (noted below) as the runtime and isolation technology for containers run in the Fargate platform. Fargate is a serverless, container-native compute offering from Amazon Web Services. @@ -36,7 +38,7 @@ including the Balena project listed below. **_BuildKit_** - The Moby project's [BuildKit](https://github.com/moby/buildkit) can use either runC or containerd as build execution backends for building container images. BuildKit support has also been built into the Docker engine in recent releases, making BuildKit provide the backend to the `docker build` command. -**_[Azure Kubernetes Service (AKS)](https://azure.microsoft.com/services/kubernetes-service)_** - Microsoft's managed Kubernetes offering uses containerd for Linux nodes running v1.19 or greater. Containerd for Windows nodes is currently in public preview. [More Details](https://docs.microsoft.com/azure/aks/cluster-configuration#container-runtime-configuration) +**_[Azure Kubernetes Service (AKS)](https://azure.microsoft.com/services/kubernetes-service)_** - Microsoft's managed Kubernetes offering uses containerd for Linux nodes running v1.19 and greater, and Windows nodes running 1.20 and greater. [More Details](https://docs.microsoft.com/azure/aks/cluster-configuration#container-runtime-configuration) **_Amazon Firecracker_** - The AWS [Firecracker VMM project](http://firecracker-microvm.io/) has extended containerd with a new snapshotter and v2 shim to allow containerd to drive virtualized container processes via their VMM implementation. More details on their containerd integration are available in [their GitHub project](https://github.com/firecracker-microvm/firecracker-containerd). @@ -52,6 +54,8 @@ including the Balena project listed below. **_[Talos Linux](https://www.talos.dev/)_** - Talos Linux is Linux designed for Kubernetes – secure, immutable, and minimal. Talos Linux is using containerd as the core system runtime and CRI implementation. +**_Deckhouse_** - [Deckhouse Kubernetes Platform](https://deckhouse.io/) from Flant allows you to manage Kubernetes clusters anywhere in a fully automatic and uniform fashion. It uses containerd as the default CRI runtime. + **_Other Projects_** - While the above list provides a cross-section of well known uses of containerd, the simplicity and clear API layer for containerd has inspired many smaller projects around providing simple container management platforms. Several examples of building higher layer functionality on top of the containerd base have come from various containerd community participants: - Michael Crosby's [boss](https://github.com/crosbymichael/boss) project, - Evan Hazlett's [stellar](https://github.com/ehazlett/stellar) project, diff --git a/vendor/github.com/containerd/containerd/BUILDING.md b/vendor/github.com/containerd/containerd/BUILDING.md index 4f2196e6c240b..ac9a2716fae0b 100644 --- a/vendor/github.com/containerd/containerd/BUILDING.md +++ b/vendor/github.com/containerd/containerd/BUILDING.md @@ -14,10 +14,12 @@ This doc includes: To build the `containerd` daemon, and the `ctr` simple test client, the following build system dependencies are required: -* Go 1.13.x or above except 1.14.x +* Go 1.19.x or above * Protoc 3.x compiler and headers (download at the [Google protobuf releases page](https://github.com/protocolbuffers/protobuf/releases)) * Btrfs headers and libraries for your distribution. Note that building the btrfs driver can be disabled via the build tag `no_btrfs`, removing this dependency. +> *Note*: On macOS, you need a third party runtime to run containers on containerd + ## Build the development environment First you need to setup your Go development environment. You can follow this @@ -37,12 +39,16 @@ wget -c https://github.com/protocolbuffers/protobuf/releases/download/v3.11.4/pr sudo unzip protoc-3.11.4-linux-x86_64.zip -d /usr/local ``` -`containerd` uses [Btrfs](https://en.wikipedia.org/wiki/Btrfs) it means that you -need to satisfy these dependencies in your system: +To enable optional [Btrfs](https://en.wikipedia.org/wiki/Btrfs) snapshotter, you should have the headers from the Linux kernel 4.12 or later. +The dependency on the kernel headers only affects users building containerd from source. +Users on older kernels may opt to not compile the btrfs support (see `BUILDTAGS=no_btrfs` below), +or to provide headers from a newer kernel. -* CentOS/Fedora: `yum install btrfs-progs-devel` -* Debian/Ubuntu: `apt-get install btrfs-progs libbtrfs-dev` - * Debian(before Buster)/Ubuntu(before 19.10): `apt-get install btrfs-tools` +> **Note** +> The dependency on the Linux kernel headers 4.12 was introduced in containerd 1.7.0-beta.4. +> +> containerd 1.6 has different set of dependencies for enabling btrfs. +> containerd 1.6 users should refer to https://github.com/containerd/containerd/blob/release/1.6/BUILDING.md#build-the-development-environment At this point you are ready to build `containerd` yourself! @@ -54,6 +60,8 @@ the system, sometimes it is necessary to build runc directly when working with container runtime development. Make sure to follow the guidelines for versioning in [RUNC.md](/docs/RUNC.md) for the best results. +> *Note*: Runc only supports Linux + ## Build containerd `containerd` uses `make` to create a repeatable build flow. It means that you @@ -117,6 +125,8 @@ Changes to these files should become a single commit for a PR which relies on ve Please refer to [RUNC.md](/docs/RUNC.md) for the currently supported version of `runc` that is used by containerd. +> *Note*: On macOS, the containerd daemon can be built and run natively. However, as stated above, runc only supports linux. + ### Static binaries You can build static binaries by providing a few variables to `make`: @@ -141,9 +151,6 @@ You can build an image from this `Dockerfile`: ```dockerfile FROM golang - -RUN apt-get update && \ - apt-get install -y libbtrfs-dev ``` Let's suppose that you built an image called `containerd/build`. From the @@ -180,7 +187,7 @@ We can build an image from this `Dockerfile`: FROM golang RUN apt-get update && \ - apt-get install -y libbtrfs-dev libseccomp-dev + apt-get install -y libseccomp-dev ``` In our Docker container we will build `runc` build, which includes @@ -236,6 +243,7 @@ During the automated CI the unit tests and integration tests are run as part of - `make test`: run all non-integration tests that do not require `root` privileges - `make root-test`: run all non-integration tests which require `root` - `make integration`: run all tests, including integration tests and those which require `root`. `TESTFLAGS_PARALLEL` can be used to control parallelism. For example, `TESTFLAGS_PARALLEL=1 make integration` will lead a non-parallel execution. The default value of `TESTFLAGS_PARALLEL` is **8**. + - `make cri-integration`: [CRI Integration Tests](https://github.com/containerd/containerd/blob/main/docs/cri/testing.md#cri-integration-test) run cri integration tests To execute a specific test or set of tests you can use the `go test` capabilities without using the `Makefile` targets. The following examples show how to specify a test @@ -271,7 +279,7 @@ In addition to `go test`-based testing executed via the `Makefile` targets, the With this tool you can stress a running containerd daemon for a specified period of time, selecting a concurrency level to generate stress against the daemon. The following command is an example of having five workers running for two hours against a default containerd gRPC socket address: ```sh -containerd-stress -c 5 -t 120 +containerd-stress -c 5 -d 120m ``` For more information on this tool's options please run `containerd-stress --help`. diff --git a/vendor/github.com/containerd/containerd/Makefile b/vendor/github.com/containerd/containerd/Makefile index 7441eeac66e12..02a8aa2027a9b 100644 --- a/vendor/github.com/containerd/containerd/Makefile +++ b/vendor/github.com/containerd/containerd/Makefile @@ -75,6 +75,7 @@ WHALE = "🇩" ONI = "👹" RELEASE=containerd-$(VERSION:v%=%)-${GOOS}-${GOARCH} +STATICRELEASE=containerd-static-$(VERSION:v%=%)-${GOOS}-${GOARCH} CRIRELEASE=cri-containerd-$(VERSION:v%=%)-${GOOS}-${GOARCH} CRICNIRELEASE=cri-containerd-cni-$(VERSION:v%=%)-${GOOS}-${GOARCH} @@ -88,6 +89,7 @@ ifdef BUILDTAGS GO_BUILDTAGS = ${BUILDTAGS} endif GO_BUILDTAGS ?= +GO_BUILDTAGS += urfave_cli_no_docs GO_BUILDTAGS += ${DEBUG_TAGS} ifneq ($(STATIC),) GO_BUILDTAGS += osusergo netgo static_build @@ -122,7 +124,7 @@ ifdef SKIPTESTS endif #Replaces ":" (*nix), ";" (windows) with newline for easy parsing -GOPATHS=$(shell echo ${GOPATH} | tr ":" "\n" | tr ";" "\n") +GOPATHS=$(shell go env GOPATH | tr ":" "\n" | tr ";" "\n") TESTFLAGS_RACE= GO_BUILD_FLAGS= @@ -147,7 +149,7 @@ GOTEST ?= $(GO) test OUTPUTDIR = $(join $(ROOTDIR), _output) CRIDIR=$(OUTPUTDIR)/cri -.PHONY: clean all AUTHORS build binaries test integration generate protos checkprotos coverage ci check help install uninstall vendor release mandir install-man genman install-cri-deps cri-release cri-cni-release cri-integration install-deps bin/cri-integration.test +.PHONY: clean all AUTHORS build binaries test integration generate protos check-protos coverage ci check help install uninstall vendor release static-release mandir install-man genman install-cri-deps cri-release cri-cni-release cri-integration install-deps bin/cri-integration.test .DEFAULT: default # Forcibly set the default goal to all, in case an include above brought in a rule definition. @@ -159,7 +161,7 @@ check: proto-fmt ## run all linters @echo "$(WHALE) $@" GOGC=75 golangci-lint run -ci: check binaries checkprotos coverage coverage-integration ## to be used by the CI +ci: check binaries check-protos coverage coverage-integration ## to be used by the CI AUTHORS: .mailmap .git/HEAD git log --format='%aN <%aE>' | sort -fu > $@ @@ -168,7 +170,7 @@ generate: protos @echo "$(WHALE) $@" @PATH="${ROOTDIR}/bin:${PATH}" $(GO) generate -x ${PACKAGES} -protos: bin/protoc-gen-gogoctrd ## generate protobuf +protos: bin/protoc-gen-go-fieldpath @echo "$(WHALE) $@" @find . -path ./vendor -prune -false -o -name '*.pb.go' | xargs rm $(eval TMPDIR := $(shell mktemp -d)) @@ -177,6 +179,7 @@ protos: bin/protoc-gen-gogoctrd ## generate protobuf @(PATH="${ROOTDIR}/bin:${PATH}" protobuild --quiet ${NON_API_PACKAGES}) @mv ${TMPDIR}/vendor ${ROOTDIR} @rm -rf ${TMPDIR} + go-fix-acronym -w -a '(Id|Io|Uuid|Os)$$' $(shell find api/ runtime/ -name '*.pb.go') check-protos: protos ## check if protobufs needs to be generated again @echo "$(WHALE) $@" @@ -194,8 +197,6 @@ proto-fmt: ## check format of proto files @echo "$(WHALE) $@" @test -z "$$(find . -path ./vendor -prune -o -path ./protobuf/google/rpc -prune -o -name '*.proto' -type f -exec grep -Hn -e "^ " {} \; | tee /dev/stderr)" || \ (echo "$(ONI) please indent proto files with tabs only" && false) - @test -z "$$(find . -path ./vendor -prune -o -name '*.proto' -type f -exec grep -Hn "Meta meta = " {} \; | grep -v '(gogoproto.nullable) = false' | tee /dev/stderr)" || \ - (echo "$(ONI) meta fields in proto files must have option (gogoproto.nullable) = false" && false) build: ## build the go packages @echo "$(WHALE) $@" @@ -218,9 +219,9 @@ bin/cri-integration.test: @echo "$(WHALE) $@" @$(GO) test -c ./integration -o bin/cri-integration.test -cri-integration: binaries bin/cri-integration.test ## run cri integration tests +cri-integration: binaries bin/cri-integration.test ## run cri integration tests (example: FOCUS=TestContainerListStats make cri-integration) @echo "$(WHALE) $@" - @bash -x ./script/test/cri-integration.sh + @bash ./script/test/cri-integration.sh @rm -rf bin/cri-integration.test # build runc shimv2 with failpoint control, only used by integration test @@ -241,13 +242,18 @@ FORCE: define BUILD_BINARY @echo "$(WHALE) $@" -@$(GO) build ${DEBUG_GO_GCFLAGS} ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@ ${GO_LDFLAGS} ${GO_TAGS} ./$< +$(GO) build ${DEBUG_GO_GCFLAGS} ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@ ${GO_LDFLAGS} ${GO_TAGS} ./$< endef # Build a binary from a cmd. bin/%: cmd/% FORCE $(call BUILD_BINARY) +# gen-manpages must not have the urfave_cli_no_docs build-tag set +bin/gen-manpages: cmd/gen-manpages FORCE + @echo "$(WHALE) $@" + $(GO) build ${DEBUG_GO_GCFLAGS} ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@ ${GO_LDFLAGS} $(subst urfave_cli_no_docs,,${GO_TAGS}) ./cmd/gen-manpages + bin/containerd-shim: cmd/containerd-shim FORCE # set !cgo and omit pie for a static shim build: https://github.com/golang/go/issues/17789#issuecomment-258542220 @echo "$(WHALE) $@" @CGO_ENABLED=${SHIM_CGO_ENABLED} $(GO) build ${GO_BUILD_FLAGS} -o $@ ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim @@ -272,13 +278,13 @@ mandir: # Kept for backwards compatibility genman: man/containerd.8 man/ctr.8 -man/containerd.8: FORCE +man/containerd.8: bin/gen-manpages FORCE @echo "$(WHALE) $@" - $(GO) run -mod=readonly ${GO_TAGS} cmd/gen-manpages/main.go $(@F) $(@D) + $< $(@F) $(@D) -man/ctr.8: FORCE +man/ctr.8: bin/gen-manpages FORCE @echo "$(WHALE) $@" - $(GO) run -mod=readonly ${GO_TAGS} cmd/gen-manpages/main.go $(@F) $(@D) + $< $(@F) $(@D) man/%: docs/man/%.md FORCE @echo "$(WHALE) $@" @@ -294,18 +300,40 @@ install-man: man $(foreach manpage,$(addprefix man/,$(MANPAGES)), $(call installmanpage,$(manpage),$(subst .,,$(suffix $(manpage))),$(notdir $(manpage)))) +define pack_release + @rm -rf releases/$(1) releases/$(1).tar.gz + @$(INSTALL) -d releases/$(1)/bin + @$(INSTALL) $(BINARIES) releases/$(1)/bin + @tar -czf releases/$(1).tar.gz -C releases/$(1) bin + @rm -rf releases/$(1) +endef + + releases/$(RELEASE).tar.gz: $(BINARIES) @echo "$(WHALE) $@" - @rm -rf releases/$(RELEASE) releases/$(RELEASE).tar.gz - @$(INSTALL) -d releases/$(RELEASE)/bin - @$(INSTALL) $(BINARIES) releases/$(RELEASE)/bin - @tar -czf releases/$(RELEASE).tar.gz -C releases/$(RELEASE) bin - @rm -rf releases/$(RELEASE) + $(call pack_release,$(RELEASE)) release: releases/$(RELEASE).tar.gz @echo "$(WHALE) $@" @cd releases && sha256sum $(RELEASE).tar.gz >$(RELEASE).tar.gz.sha256sum +releases/$(STATICRELEASE).tar.gz: +ifeq ($(GOOS),linux) + @make STATIC=1 $(BINARIES) + @echo "$(WHALE) $@" + $(call pack_release,$(STATICRELEASE)) +else + @echo "Skipping $(STATICRELEASE) for $(GOOS)" +endif + +static-release: releases/$(STATICRELEASE).tar.gz +ifeq ($(GOOS),linux) + @echo "$(WHALE) $@" + @cd releases && sha256sum $(STATICRELEASE).tar.gz >$(STATICRELEASE).tar.gz.sha256sum +else + @echo "Skipping releasing $(STATICRELEASE) for $(GOOS)" +endif + # install of cri deps into release output directory ifeq ($(GOOS),windows) install-cri-deps: $(BINARIES) @@ -332,22 +360,26 @@ install-cri-deps: $(BINARIES) @$(INSTALL) $(BINARIES) $(CRIDIR)/bin endif +$(CRIDIR)/cri-containerd.DEPRECATED.txt: + @mkdir -p $(CRIDIR) + @$(INSTALL) -m 644 releases/cri-containerd.DEPRECATED.txt $@ + ifeq ($(GOOS),windows) -releases/$(CRIRELEASE).tar.gz: install-cri-deps +releases/$(CRIRELEASE).tar.gz: install-cri-deps $(CRIDIR)/cri-containerd.DEPRECATED.txt @echo "$(WHALE) $@" @cd $(CRIDIR) && tar -czf ../../releases/$(CRIRELEASE).tar.gz * -releases/$(CRICNIRELEASE).tar.gz: install-cri-deps +releases/$(CRICNIRELEASE).tar.gz: install-cri-deps $(CRIDIR)/cri-containerd.DEPRECATED.txt @echo "$(WHALE) $@" @cd $(CRIDIR) && tar -czf ../../releases/$(CRICNIRELEASE).tar.gz * else -releases/$(CRIRELEASE).tar.gz: install-cri-deps +releases/$(CRIRELEASE).tar.gz: install-cri-deps $(CRIDIR)/cri-containerd.DEPRECATED.txt @echo "$(WHALE) $@" - @tar -czf releases/$(CRIRELEASE).tar.gz -C $(CRIDIR) etc/crictl.yaml etc/systemd usr opt/containerd + @tar -czf releases/$(CRIRELEASE).tar.gz -C $(CRIDIR) cri-containerd.DEPRECATED.txt etc/crictl.yaml etc/systemd usr opt/containerd -releases/$(CRICNIRELEASE).tar.gz: install-cri-deps +releases/$(CRICNIRELEASE).tar.gz: install-cri-deps $(CRIDIR)/cri-containerd.DEPRECATED.txt @echo "$(WHALE) $@" - @tar -czf releases/$(CRICNIRELEASE).tar.gz -C $(CRIDIR) etc usr opt + @tar -czf releases/$(CRICNIRELEASE).tar.gz -C $(CRIDIR) cri-containerd.DEPRECATED.txt etc usr opt endif cri-release: releases/$(CRIRELEASE).tar.gz diff --git a/vendor/github.com/containerd/containerd/Protobuild.toml b/vendor/github.com/containerd/containerd/Protobuild.toml index ccc4e79cf0b58..09a4c97047044 100644 --- a/vendor/github.com/containerd/containerd/Protobuild.toml +++ b/vendor/github.com/containerd/containerd/Protobuild.toml @@ -1,6 +1,5 @@ -version = "unstable" -generator = "gogoctrd" -plugins = ["grpc", "fieldpath"] +version = "2" +generators = ["go"] # Control protoc include paths. Below are usually some good defaults, but feel # free to try it without them if it works for your project. @@ -9,32 +8,25 @@ plugins = ["grpc", "fieldpath"] # treat the root of the project as an include, but this may not be necessary. before = ["./protobuf"] - # Paths that should be treated as include roots in relation to the vendor - # directory. These will be calculated with the vendor directory nearest the - # target package. - packages = ["github.com/gogo/protobuf", "github.com/gogo/googleapis"] - # Paths that will be added untouched to the end of the includes. We use # `/usr/local/include` to pickup the common install location of protobuf. # This is the default. after = ["/usr/local/include", "/usr/include"] -# This section maps protobuf imports to Go packages. These will become -# `-M` directives in the call to the go protobuf generator. -[packages] - "gogoproto/gogo.proto" = "github.com/gogo/protobuf/gogoproto" - "google/protobuf/any.proto" = "github.com/gogo/protobuf/types" - "google/protobuf/empty.proto" = "github.com/gogo/protobuf/types" - "google/protobuf/descriptor.proto" = "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" - "google/protobuf/field_mask.proto" = "github.com/gogo/protobuf/types" - "google/protobuf/timestamp.proto" = "github.com/gogo/protobuf/types" - "google/protobuf/duration.proto" = "github.com/gogo/protobuf/types" - "google/rpc/status.proto" = "github.com/gogo/googleapis/google/rpc" - [[overrides]] # enable ttrpc and disable fieldpath and grpc for the shim -prefixes = ["github.com/containerd/containerd/runtime/v1/shim/v1", "github.com/containerd/containerd/runtime/v2/task"] -plugins = ["ttrpc"] +prefixes = [ + "github.com/containerd/containerd/runtime/v1/shim/v1", + "github.com/containerd/containerd/api/runtime/task/v2", + "github.com/containerd/containerd/api/runtime/sandbox/v1", +] +generators = ["go", "go-ttrpc"] + +[[overrides]] +prefixes = [ + "github.com/containerd/containerd/third_party/k8s.io/cri-api/pkg/apis/runtime/v1alpha2", +] +generators = ["go", "go-grpc"] # Lock down runc config [[descriptors]] @@ -42,7 +34,6 @@ prefix = "github.com/containerd/containerd/runtime/linux/runctypes" target = "runtime/linux/runctypes/next.pb.txt" ignore_files = [ "google/protobuf/descriptor.proto", - "gogoproto/gogo.proto" ] [[descriptors]] @@ -50,5 +41,4 @@ prefix = "github.com/containerd/containerd/runtime/v2/runc/options" target = "runtime/v2/runc/options/next.pb.txt" ignore_files = [ "google/protobuf/descriptor.proto", - "gogoproto/gogo.proto" ] diff --git a/vendor/github.com/containerd/containerd/README.md b/vendor/github.com/containerd/containerd/README.md index f876079abb29a..25bcaeebea2ec 100644 --- a/vendor/github.com/containerd/containerd/README.md +++ b/vendor/github.com/containerd/containerd/README.md @@ -7,25 +7,38 @@ [![Go Report Card](https://goreportcard.com/badge/github.com/containerd/containerd)](https://goreportcard.com/report/github.com/containerd/containerd) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/1271/badge)](https://bestpractices.coreinfrastructure.org/projects/1271) -containerd is an industry-standard container runtime with an emphasis on simplicity, robustness and portability. It is available as a daemon for Linux and Windows, which can manage the complete container lifecycle of its host system: image transfer and storage, container execution and supervision, low-level storage and network attachments, etc. +containerd is an industry-standard container runtime with an emphasis on simplicity, robustness, and portability. It is available as a daemon for Linux and Windows, which can manage the complete container lifecycle of its host system: image transfer and storage, container execution and supervision, low-level storage and network attachments, etc. -containerd is a member of CNCF with ['graduated'](https://landscape.cncf.io/selected=containerd) status. +containerd is a member of CNCF with ['graduated'](https://landscape.cncf.io/?selected=containerd) status. containerd is designed to be embedded into a larger system, rather than being used directly by developers or end-users. -![architecture](design/architecture.png) +![architecture](docs/historical/design/architecture.png) -## Now Recruiting +## Announcements + +### Hello Kubernetes v1.24! +The containerd project would like to announce containerd [v1.6.4](https://github.com/containerd/containerd/releases/tag/v1.6.4). While other prior releases are supported, this latest release and the containerd [v1.5.11](https://github.com/containerd/containerd/releases/tag/v1.5.11) release are recommended for Kubernetes v1.24. + +We felt it important to announce this, particularly in view of [the dockershim removal from this release of Kubernetes](https://kubernetes.io/blog/2022/05/03/dockershim-historical-context/). + +It should be noted here that moving to CRI integrations has been in the plan for many years. `containerd` began as part of `Docker` and was donated to `CNCF`. `containerd` remains in use today by Docker/moby/buildkit etc., and has many other [adopters](https://github.com/containerd/containerd/blob/main/ADOPTERS.md). `containerd` has a namespace that isolates use of `containerd` from various clients/adopters. The Kubernetes namespace is appropriately named `k8s.io`. The CRI API and `containerd` CRI plugin project has, from the start, been an effort to reduce the impact surface for Kubernetes container runtime integration. If you can't tell, we are excited to see this come to fruition. + +If you have any concerns or questions, we will be here to answer them in [issues, discussions, and/or on slack](#communication). Below you will find information/detail about our [CRI Integration](#cri) implementation. + +For containerd users already on v1.6.0-v1.6.3, there are known issues addressed by [v1.6.4](https://github.com/containerd/containerd/releases/tag/v1.6.4). The issues are primarily related to [CNI setup](https://github.com/kubernetes/website/blob/dev-1.24/content/en/docs/tasks/administer-cluster/migrating-from-dockershim/troubleshooting-cni-plugin-related-errors.md) + +### Now Recruiting We are a large inclusive OSS project that is welcoming help of any kind shape or form: * Documentation help is needed to make the product easier to consume and extend. -* We need OSS community outreach / organizing help to get the word out; manage -and create messaging and educational content; and to help with social media, community forums/groups, and google groups. +* We need OSS community outreach/organizing help to get the word out; manage +and create messaging and educational content; and help with social media, community forums/groups, and google groups. * We are actively inviting new [security advisors](https://github.com/containerd/project/blob/main/GOVERNANCE.md#security-advisors) to join the team. -* New sub-projects are being created, core and non-core that could use additional development help. +* New subprojects are being created, core and non-core that could use additional development help. * Each of the [containerd projects](https://github.com/containerd) has a list of issues currently being worked on or that need help resolving. - - If the issue has not already been assigned to someone, or has not made recent progress and you are interested, please inquire. - - If you are interested in starting with a smaller / beginner level issue, look for issues with an `exp/beginner` tag, for example [containerd/containerd beginner issues.](https://github.com/containerd/containerd/issues?q=is%3Aissue+is%3Aopen+label%3Aexp%2Fbeginner) + - If the issue has not already been assigned to someone or has not made recent progress, and you are interested, please inquire. + - If you are interested in starting with a smaller/beginner-level issue, look for issues with an `exp/beginner` tag, for example [containerd/containerd beginner issues.](https://github.com/containerd/containerd/issues?q=is%3Aissue+is%3Aopen+label%3Aexp%2Fbeginner) ## Getting Started @@ -102,7 +115,7 @@ func main() { ### Namespaces -Namespaces allow multiple consumers to use the same containerd without conflicting with each other. It has the benefit of sharing content but still having separation with containers and images. +Namespaces allow multiple consumers to use the same containerd without conflicting with each other. It has the benefit of sharing content while maintaining separation with containers and images. To set a namespace for requests to the API: @@ -132,7 +145,7 @@ err := client.Push(context, "docker.io/library/redis:latest", image.Target()) ### Containers -In containerd, a container is a metadata object. Resources such as an OCI runtime specification, image, root filesystem, and other metadata can be attached to a container. +In containerd, a container is a metadata object. Resources such as an OCI runtime specification, image, root filesystem, and other metadata can be attached to a container. ```go redis, err := client.NewContainer(context, "redis-master") @@ -141,7 +154,7 @@ defer redis.Delete(context) ### OCI Runtime Specification -containerd fully supports the OCI runtime specification for running containers. We have built in functions to help you generate runtime specifications based on images as well as custom parameters. +containerd fully supports the OCI runtime specification for running containers. We have built-in functions to help you generate runtime specifications based on images as well as custom parameters. You can specify options when creating a container about how to modify the specification. @@ -151,7 +164,7 @@ redis, err := client.NewContainer(context, "redis-master", containerd.WithNewSpe ### Root Filesystems -containerd allows you to use overlay or snapshot filesystems with your containers. It comes with built in support for overlayfs and btrfs. +containerd allows you to use overlay or snapshot filesystems with your containers. It comes with built-in support for overlayfs and btrfs. ```go // pull an image and unpack it into the configured snapshotter @@ -271,7 +284,7 @@ loaded for the user's shell environment. ### CRI -`cri` is a [containerd](https://containerd.io/) plugin implementation of the Kubernetes [container runtime interface (CRI)](https://github.com/kubernetes/cri-api/blob/master/pkg/apis/runtime/v1alpha2/api.proto). With it, you are able to use containerd as the container runtime for a Kubernetes cluster. +`cri` is a [containerd](https://containerd.io/) plugin implementation of the Kubernetes [container runtime interface (CRI)](https://github.com/kubernetes/cri-api/blob/master/pkg/apis/runtime/v1/api.proto). With it, you are able to use containerd as the container runtime for a Kubernetes cluster. ![cri](./docs/cri/cri.png) @@ -296,7 +309,7 @@ A Kubernetes incubator project, [cri-tools](https://github.com/kubernetes-sigs/c #### CRI Guides * [Installing with Ansible and Kubeadm](contrib/ansible/README.md) -* [For Non-Ansible Users, Preforming a Custom Installation Using the Release Tarball and Kubeadm](docs/cri/installation.md) +* [For Non-Ansible Users, Preforming a Custom Installation Using the Release Tarball and Kubeadm](docs/getting-started.md) * [CRI Plugin Testing Guide](./docs/cri/testing.md) * [Debugging Pods, Containers, and Images with `crictl`](./docs/cri/crictl.md) * [Configuring `cri` Plugins](./docs/cri/config.md) @@ -304,23 +317,23 @@ A Kubernetes incubator project, [cri-tools](https://github.com/kubernetes-sigs/c ### Communication -For async communication and long running discussions please use issues and pull requests on the github repo. +For async communication and long-running discussions please use issues and pull requests on the GitHub repo. This will be the best place to discuss design and implementation. -For sync communication catch us in the `#containerd` and `#containerd-dev` slack channels on Cloud Native Computing Foundation's (CNCF) slack - `cloud-native.slack.com`. Everyone is welcome to join and chat. [Get Invite to CNCF slack.](https://slack.cncf.io) +For sync communication catch us in the `#containerd` and `#containerd-dev` Slack channels on Cloud Native Computing Foundation's (CNCF) Slack - `cloud-native.slack.com`. Everyone is welcome to join and chat. [Get Invite to CNCF Slack.](https://slack.cncf.io) ### Security audit -A third party security audit was performed by Cure53 in 4Q2018; the [full report](docs/SECURITY_AUDIT.pdf) is available in our docs/ directory. +Security audits for the containerd project are hosted on our website. Please see the [security page at containerd.io](https://containerd.io/security/) for more information. ### Reporting security issues -__If you are reporting a security issue, please reach out discreetly at security@containerd.io__. +Please follow the instructions at [containerd/project](https://github.com/containerd/project/blob/main/SECURITY.md#reporting-a-vulnerability) ## Licenses The containerd codebase is released under the [Apache 2.0 license](LICENSE). -The README.md file, and files in the "docs" folder are licensed under the +The README.md file and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License. You may obtain a copy of the license, titled CC-BY-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/vendor/github.com/containerd/containerd/RELEASES.md b/vendor/github.com/containerd/containerd/RELEASES.md index 24762487c81c2..a99a04995f94e 100644 --- a/vendor/github.com/containerd/containerd/RELEASES.md +++ b/vendor/github.com/containerd/containerd/RELEASES.md @@ -1,7 +1,7 @@ # Versioning and Release This document details the versioning and release plan for containerd. Stability -is a top goal for this project and we hope that this document and the processes +is a top goal for this project, and we hope that this document and the processes it entails will help to achieve that. It covers the release process, versioning numbering, backporting, API stability and support horizons. @@ -74,14 +74,15 @@ to create the milestone or add an issue or PR to an existing milestone. ### Support Horizon Support horizons will be defined corresponding to a release branch, identified -by `.`. Releases branches will be in one of several states: +by `.`. Release branches will be in one of several states: - __*Next*__: The next planned release branch. -- __*Active*__: The release branch is currently supported and accepting patches. +- __*Active*__: The release is a stable branch which is currently supported and accepting patches. - __*Extended*__: The release branch is only accepting security patches. +- __*LTS*__: The release is a long term stable branch which is currently supported and accepting patches. - __*End of Life*__: The release branch is no longer supported and no new patches will be accepted. -Releases will be supported up to one year after a _minor_ release. This means that +Releases will be supported at least one year after a _minor_ release. This means that we will accept bug reports and backports to release branches until the end of life date. If no new _minor_ release has been made, that release will be considered supported until 6 months after the next _minor_ is released or one year, @@ -89,23 +90,58 @@ whichever is longer. Additionally, releases may have an extended security suppor period after the end of the active period to accept security backports. This timeframe will be decided by maintainers before the end of the active status. +Long term stable (_LTS_) releases will be supported for at least three years after +their initial _minor_ release. These branches will accept bug reports and +backports until the end of life date. They may also accept a wider range of +patches than non-_LTS_ releases to support the longer term maintainability of the +branch, including library dependency, toolchain (including Go) and other version updates +which are needed to ensure each release is built with fully supported dependencies and +remains usable by containerd clients. There should be at least a 6-month overlap between +the end of life of an _LTS_ release and the initial release of a new _LTS_ release. +Up to 6 months before the announced end of life of an _LTS_ branch, the branch may +convert to a regular _Active_ release with stricter backport criteria. + The current state is available in the following tables: -| Release | Status | Start | End of Life | -|---------|-------------|------------------|-------------------| -| [0.0](https://github.com/containerd/containerd/releases/tag/0.0.5) | End of Life | Dec 4, 2015 | - | -| [0.1](https://github.com/containerd/containerd/releases/tag/v0.1.0) | End of Life | Mar 21, 2016 | - | -| [0.2](https://github.com/containerd/containerd/tree/v0.2.x) | End of Life | Apr 21, 2016 | December 5, 2017 | -| [1.0](https://github.com/containerd/containerd/releases/tag/v1.0.3) | End of Life | December 5, 2017 | December 5, 2018 | -| [1.1](https://github.com/containerd/containerd/releases/tag/v1.1.8) | End of Life | April 23, 2018 | October 23, 2019 | -| [1.2](https://github.com/containerd/containerd/releases/tag/v1.2.13) | End of Life | October 24, 2018 | October 15, 2020 | -| [1.3](https://github.com/containerd/containerd/releases/tag/v1.3.10) | End of Life | September 26, 2019 | March 4, 2021 | -| [1.4](https://github.com/containerd/containerd/releases/tag/v1.4.12) | Extended | August 17, 2020 | March 3, 2022 (Extended) | -| [1.5](https://github.com/containerd/containerd/releases/tag/v1.5.9) | Active | May 3, 2021 | October 28, 2022 | -| [1.6](https://github.com/containerd/containerd/releases/tag/v1.6.0) | Active | February 15, 2022 | max(February 15, 2023 or release of 1.7.0 + 6 months) | -| [1.7](https://github.com/containerd/containerd/milestone/42) | Next | TBD | TBD | - -Note that branches and release from before 1.0 may not follow these rules. +| Release | Status | Start | End of Life | +| --------- | ------------- | ------------------ | ------------------- | +| [0.0](https://github.com/containerd/containerd/releases/tag/0.0.5) | End of Life | Dec 4, 2015 | - | +| [0.1](https://github.com/containerd/containerd/releases/tag/v0.1.0) | End of Life | Mar 21, 2016 | - | +| [0.2](https://github.com/containerd/containerd/tree/v0.2.x) | End of Life | Apr 21, 2016 | December 5, 2017 | +| [1.0](https://github.com/containerd/containerd/releases/tag/v1.0.3) | End of Life | December 5, 2017 | December 5, 2018 | +| [1.1](https://github.com/containerd/containerd/releases/tag/v1.1.8) | End of Life | April 23, 2018 | October 23, 2019 | +| [1.2](https://github.com/containerd/containerd/releases/tag/v1.2.13) | End of Life | October 24, 2018 | October 15, 2020 | +| [1.3](https://github.com/containerd/containerd/releases/tag/v1.3.10) | End of Life | September 26, 2019 | March 4, 2021 | +| [1.4](https://github.com/containerd/containerd/releases/tag/v1.4.13) | End of Life | August 17, 2020 | March 3, 2022 | +| [1.5](https://github.com/containerd/containerd/releases/tag/v1.5.17) | End of Life | May 3, 2021 | February 28, 2023 | +| [1.6](https://github.com/containerd/containerd/releases/tag/v1.6.16) | LTS | February 15, 2022 | max(February 15, 2025 or next LTS + 6 months) | +| [1.7](https://github.com/containerd/containerd/milestone/42) | Next | TBD | TBD | + + +### Kubernetes Support + +The Kubernetes version matrix represents the versions of containerd which are +recommended for a Kubernetes release. Any actively supported version of +containerd may receive patches to fix bugs encountered in any version of +Kubernetes, however, our recommendation is based on which versions have been +the most thoroughly tested. See the [Kubernetes test grid](https://testgrid.k8s.io/sig-node-containerd) +for the list of actively tested versions. Kubernetes only supports n-3 minor +release versions and containerd will ensure there is always a supported version +of containerd for every supported version of Kubernetes. + +| Kubernetes Version | containerd Version | CRI Version | +|--------------------|--------------------|-----------------| +| 1.20 | 1.5.0+ | v1alpha2 | +| 1.21 | 1.5.0+ | v1alpha2 | +| 1.22 | 1.5.5+ | v1alpha2 | +| 1.23 | 1.6.0+, 1,5.8+ | v1, v1alpha2 | +| 1.24 | 1.6.4+, 1.5.11+ | v1, v1alpha2 | +| 1.25 | 1.6.4+, 1.5.11+ | v1, v1alpha2 ** | +| 1.26 | 1.6.tbd, 1.7.tbd | v1 | + +** Note: containerd v.1.5._, v1.6._, and v1.7.* will support CRI v1 and v1alpha2 through EOL as those releases will continue to support older versions of k8s, cloud providers, and other clients using CRI v1alpha2. CRI v1alpha2 will be deprecated in v1.7 and removed in containerd v2. + +Deprecated containerd and kubernetes versions | CRI-Containerd Version | Containerd Version | Kubernetes Version | CRI Version | |------------------------|--------------------|--------------------|--------------| @@ -114,20 +150,14 @@ Note that branches and release from before 1.0 may not follow these rules. | End-Of-Life | v1.1 (End-Of-Life) | 1.10+ | v1alpha2 | | | v1.2 (End-Of-Life) | 1.10+ | v1alpha2 | | | v1.3 (End-Of-Life) | 1.12+ | v1alpha2 | -| | v1.4 | 1.19+ | v1alpha2 | -| | v1.5 | 1.20+ | v1alpha2 | -| | v1.6 | 1.23+ | v1, v1alpha2 | - -**Note:** The support table above specifies the Kubernetes Version that was supported at time of release of the containerd - cri integration and Kubernetes only supports n-3 minor release versions. - -These tables should be updated as part of the release preparation process. +| | v1.4 (End-of-Life) | 1.19+ | v1alpha2 | ### Backporting Backports in containerd are community driven. As maintainers, we'll try to ensure that sensible bugfixes make it into _active_ release, but our main focus will be features for the next _minor_ or _major_ release. For the most part, -this process is straightforward and we are here to help make it as smooth as +this process is straightforward, and we are here to help make it as smooth as possible. If there are important fixes that need to be backported, please let us know in @@ -137,7 +167,10 @@ one of three ways: 2. Open a PR with cherry-picked change from main. 3. Open a PR with a ported fix. -__If you are reporting a security issue, please reach out discreetly at security@containerd.io__. +__If you are reporting a security issue:__ + +Please follow the instructions at [containerd/project](https://github.com/containerd/project/blob/main/SECURITY.md#reporting-a-vulnerability) + Remember that backported PRs must follow the versioning guidelines from this document. Any release that is "active" can accept backports. Opening a backport PR is @@ -145,7 +178,7 @@ fairly straightforward. The steps differ depending on whether you are pulling a fix from main or need to draft a new commit specific to a particular branch. -To cherry pick a straightforward commit from main, simply use the cherry pick +To cherry-pick a straightforward commit from main, simply use the cherry-pick process: 1. Pick the branch to which you want backported, usually in the format @@ -162,6 +195,9 @@ process: ```console $ git cherry-pick -xsS ``` + (Optional) If other commits exist in the main branch which are related + to the cherry-picked commit; eg: fixes to the main PR. It is recommended + to cherry-pick those commits also into this same `my-backport-branch`. 4. Push the branch and open up a PR against the _release branch_: ``` @@ -241,7 +277,7 @@ Plugins implemented in tree are supported by the containerd community unless exp Out of tree plugins are not supported by the containerd maintainers. Currently, the Windows runtime and snapshot plugins are not stable and not supported. -Please refer to the github milestones for Windows support in a future release. +Please refer to the GitHub milestones for Windows support in a future release. #### Error Codes @@ -253,7 +289,7 @@ new version of the service. If you find that an error code that is required by your application is not well-documented in the protobuf service description or tested explicitly, -please file and issue and we will clarify. +please file an issue and we will clarify. #### Opaque Fields @@ -281,7 +317,7 @@ be a matter of fixing compilation errors and moving from there. The CRI (Container Runtime Interface) GRPC API is used by a Kubernetes kubelet to communicate with a container runtime. This interface is used to manage -container lifecycles and container images. Currently this API is under +container lifecycles and container images. Currently, this API is under development and unstable across Kubernetes releases. Each Kubernetes release only supports a single version of CRI and the CRI plugin only implements a single version of CRI. @@ -294,7 +330,7 @@ version of Kubernetes which supports that version of CRI. The `ctr` tool provides the ability to introspect and understand the containerd API. It is not considered a primary offering of the project and is unsupported in -that sense. While we understand it's value as a debug tool, it may be completely +that sense. While we understand its value as a debug tool, it may be completely refactored or have breaking changes in _minor_ releases. Targeting `ctr` for feature additions reflects a misunderstanding of the containerd @@ -336,10 +372,31 @@ against total impact. The deprecated features are shown in the following table: -| Component | Deprecation release | Target release for removal | Recommendation | -|----------------------------------------------------------------------|---------------------|----------------------------|-----------------------------------| -| Runtime V1 API and implementation (`io.containerd.runtime.v1.linux`) | containerd v1.4 | containerd v2.0 | Use `io.containerd.runc.v2` | -| Runc V1 implementation of Runtime V2 (`io.containerd.runc.v1`) | containerd v1.4 | containerd v2.0 | Use `io.containerd.runc.v2` | -| config.toml `version = 1` | containerd v1.5 | containerd v2.0 | Use config.toml `version = 2` | -| Built-in `aufs` snapshotter | containerd v1.5 | containerd v2.0 | Use `overlayfs` snapshotter | -| `cri-containerd-*.tar.gz` release bundles | containerd v1.6 | containerd v2.0 | Use `containerd-*.tar.gz` bundles | +| Component | Deprecation release | Target release for removal | Recommendation | +|----------------------------------------------------------------------------------|---------------------|----------------------------|-----------------------------------| +| Runtime V1 API and implementation (`io.containerd.runtime.v1.linux`) | containerd v1.4 | containerd v2.0 | Use `io.containerd.runc.v2` | +| Runc V1 implementation of Runtime V2 (`io.containerd.runc.v1`) | containerd v1.4 | containerd v2.0 | Use `io.containerd.runc.v2` | +| config.toml `version = 1` | containerd v1.5 | containerd v2.0 | Use config.toml `version = 2` | +| Built-in `aufs` snapshotter | containerd v1.5 | containerd v2.0 | Use `overlayfs` snapshotter | +| `cri-containerd-*.tar.gz` release bundles | containerd v1.6 | containerd v2.0 | Use `containerd-*.tar.gz` bundles | +| Pulling Schema 1 images (`application/vnd.docker.distribution.manifest.v1+json`) | containerd v1.7 | containerd v2.0 | Use Schema 2 or OCI images | +| CRI `v1alpha2` | containerd v1.7 | containerd v2.0 | Use CRI `v1` | + +## Experimental features + +Experimental features are new features added to containerd which do not have the +same stability guarantees as the rest of containerd. An effort is made to avoid +breaking interfaces between versions, but changes to experimental features before +being fully supported is possible. Users can still expect experimental features +to be high quality and are encouraged to use new features to help them stabilize +more quickly. + +| Component | Initial Release | Target Supported Release | +|----------------------------------------------------------------------------------------|-----------------|--------------------------| +| [Sandbox Service](https://github.com/containerd/containerd/pull/6703) | containerd v1.7 | containerd v2.0 | +| [Sandbox CRI Server](https://github.com/containerd/containerd/pull/7228) | containerd v1.7 | containerd v2.0 | +| [Transfer Service](https://github.com/containerd/containerd/pull/7320) | containerd v1.7 | containerd v2.0 | +| [NRI in CRI Support](https://github.com/containerd/containerd/pull/6019) | containerd v1.7 | containerd v2.0 | +| [gRPC Shim](https://github.com/containerd/containerd/pull/8052) | containerd v1.7 | containerd v2.0 | +| [CRI Runtime Specific Snapshotter](https://github.com/containerd/containerd/pull/6899) | containerd v1.7 | containerd v2.0 | +| [CRI Support for User Namespaces](https://github.com/containerd/containerd/pull/7679) | containerd v1.7 | containerd v2.0 | diff --git a/vendor/github.com/containerd/containerd/ROADMAP.md b/vendor/github.com/containerd/containerd/ROADMAP.md index aacd10739266c..cf542e1e45851 100644 --- a/vendor/github.com/containerd/containerd/ROADMAP.md +++ b/vendor/github.com/containerd/containerd/ROADMAP.md @@ -1,7 +1,7 @@ # containerd roadmap containerd uses the issues and milestones to define its roadmap. -`ROADMAP.md` files are common in open source projects but we find they quickly become out of date. +`ROADMAP.md` files are common in open source projects, but we find they quickly become out of date. We opt for an issues and milestone approach that our maintainers and community can keep up-to-date as work is added and completed. ## Issues diff --git a/vendor/github.com/containerd/containerd/SCOPE.md b/vendor/github.com/containerd/containerd/SCOPE.md index aec9da9158b4f..3eb1c55fc2d06 100644 --- a/vendor/github.com/containerd/containerd/SCOPE.md +++ b/vendor/github.com/containerd/containerd/SCOPE.md @@ -31,7 +31,7 @@ Additional implementations will not be accepted into the core repository and sho ## Scope The following table specifies the various components of containerd and general features of container runtimes. -The table specifies whether or not the feature/component is in or out of scope. +The table specifies whether the feature/component is in or out of scope. | Name | Description | In/Out | Reason | |------------------------------|--------------------------------------------------------------------------------------------------------|--------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| diff --git a/vendor/github.com/containerd/containerd/Vagrantfile b/vendor/github.com/containerd/containerd/Vagrantfile index e81bfc2dc30ef..08cbc4a07d0a2 100644 --- a/vendor/github.com/containerd/containerd/Vagrantfile +++ b/vendor/github.com/containerd/containerd/Vagrantfile @@ -17,21 +17,29 @@ # Vagrantfile for Fedora and EL Vagrant.configure("2") do |config| - config.vm.box = ENV["BOX"] || "fedora/37-cloud-base" - config.vm.box_version = ENV["BOX_VERSION"] + config.vm.box = ENV["BOX"] ? ENV["BOX"].split("@")[0] : "fedora/37-cloud-base" + # BOX_VERSION is deprecated. Use "BOX=@". + config.vm.box_version = ENV["BOX_VERSION"] || (ENV["BOX"].split("@")[1] if ENV["BOX"]) + memory = 4096 cpus = 2 - config.vm.provider :virtualbox do |v| + disk_size = 60 + config.vm.provider :virtualbox do |v, o| v.memory = memory v.cpus = cpus + # Needs env var VAGRANT_EXPERIMENTAL="disks" + o.vm.disk :disk, size: "#{disk_size}GB", primary: true end config.vm.provider :libvirt do |v| v.memory = memory v.cpus = cpus + v.machine_virtual_size = disk_size end config.vm.synced_folder ".", "/vagrant", type: "rsync" + config.vm.provision 'shell', path: 'script/resize-vagrant-root.sh' + # Disabled by default. To run: # vagrant up --provision-with=upgrade-packages # To upgrade only specific packages: @@ -93,7 +101,7 @@ EOF config.vm.provision "install-golang", type: "shell", run: "once" do |sh| sh.upload_path = "/tmp/vagrant-install-golang" sh.env = { - 'GO_VERSION': ENV['GO_VERSION'] || "1.19.6", + 'GO_VERSION': ENV['GO_VERSION'] || "1.20.1", } sh.inline = <<~SHELL #!/usr/bin/env bash @@ -199,6 +207,19 @@ EOF SHELL end + config.vm.provision "install-failpoint-binaries", type: "shell", run: "once" do |sh| + sh.upload_path = "/tmp/vagrant-install-failpoint-binaries" + sh.inline = <<~SHELL + #!/usr/bin/env bash + source /etc/environment + source /etc/profile.d/sh.local + set -eux -o pipefail + ${GOPATH}/src/github.com/containerd/containerd/script/setup/install-failpoint-binaries + chcon -v -t container_runtime_exec_t $(type -ap containerd-shim-runc-fp-v1) + containerd-shim-runc-fp-v1 -v + SHELL + end + # SELinux is Enforcing by default. # To set SELinux as Disabled on a VM that has already been provisioned: # SELINUX=Disabled vagrant up --provision-with=selinux @@ -224,6 +245,7 @@ EOF 'RUNC_FLAVOR': ENV['RUNC_FLAVOR'] || "runc", 'GOTEST': ENV['GOTEST'] || "go test", 'GOTESTSUM_JUNITFILE': ENV['GOTESTSUM_JUNITFILE'], + 'GOTESTSUM_JSONFILE': ENV['GOTESTSUM_JSONFILE'], } sh.inline = <<~SHELL #!/usr/bin/env bash @@ -237,6 +259,36 @@ EOF SHELL end + # SELinux is Enforcing by default (via provisioning) in this VM. To re-run with SELinux disabled: + # SELINUX=Disabled vagrant up --provision-with=selinux,test-cri-integration + # + config.vm.provision "test-cri-integration", type: "shell", run: "never" do |sh| + sh.upload_path = "/tmp/test-cri-integration" + sh.env = { + 'GOTEST': ENV['GOTEST'] || "go test", + 'GOTESTSUM_JUNITFILE': ENV['GOTESTSUM_JUNITFILE'], + 'GOTESTSUM_JSONFILE': ENV['GOTESTSUM_JSONFILE'], + 'GITHUB_WORKSPACE': '', + 'ENABLE_CRI_SANDBOXES': ENV['ENABLE_CRI_SANDBOXES'], + } + sh.inline = <<~SHELL + #!/usr/bin/env bash + source /etc/environment + source /etc/profile.d/sh.local + set -eux -o pipefail + cleanup() { + rm -rf /var/lib/containerd* /run/containerd* /tmp/containerd* /tmp/test* /tmp/failpoint* /tmp/nri* + } + cleanup + cd ${GOPATH}/src/github.com/containerd/containerd + # cri-integration.sh executes containerd from ./bin, not from $PATH . + make BUILDTAGS="seccomp selinux no_aufs no_btrfs no_devmapper no_zfs" binaries bin/cri-integration.test + chcon -v -t container_runtime_exec_t ./bin/{containerd,containerd-shim*} + CONTAINERD_RUNTIME=io.containerd.runc.v2 ./script/test/cri-integration.sh + cleanup + SHELL + end + # SELinux is Enforcing by default (via provisioning) in this VM. To re-run with SELinux disabled: # SELINUX=Disabled vagrant up --provision-with=selinux,test-cri # diff --git a/vendor/github.com/containerd/containerd/api/events/container.pb.go b/vendor/github.com/containerd/containerd/api/events/container.pb.go index fe002e0736c26..d7d40258c31c3 100644 --- a/vendor/github.com/containerd/containerd/api/events/container.pb.go +++ b/vendor/github.com/containerd/containerd/api/events/container.pb.go @@ -1,1413 +1,431 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/events/container.proto package events import ( - fmt "fmt" - github_com_containerd_typeurl "github.com/containerd/typeurl" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - types "github.com/gogo/protobuf/types" - io "io" - math "math" - math_bits "math/bits" + _ "github.com/containerd/containerd/protobuf/plugin" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type ContainerCreate struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Image string `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"` - Runtime *ContainerCreate_Runtime `protobuf:"bytes,3,opt,name=runtime,proto3" json:"runtime,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *ContainerCreate) Reset() { *m = ContainerCreate{} } -func (*ContainerCreate) ProtoMessage() {} -func (*ContainerCreate) Descriptor() ([]byte, []int) { - return fileDescriptor_0d1f05b8626f83ea, []int{0} -} -func (m *ContainerCreate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Image string `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"` + Runtime *ContainerCreate_Runtime `protobuf:"bytes,3,opt,name=runtime,proto3" json:"runtime,omitempty"` } -func (m *ContainerCreate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ContainerCreate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil + +func (x *ContainerCreate) Reset() { + *x = ContainerCreate{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_container_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ContainerCreate) XXX_Merge(src proto.Message) { - xxx_messageInfo_ContainerCreate.Merge(m, src) -} -func (m *ContainerCreate) XXX_Size() int { - return m.Size() -} -func (m *ContainerCreate) XXX_DiscardUnknown() { - xxx_messageInfo_ContainerCreate.DiscardUnknown(m) -} - -var xxx_messageInfo_ContainerCreate proto.InternalMessageInfo -type ContainerCreate_Runtime struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Options *types.Any `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *ContainerCreate) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ContainerCreate_Runtime) Reset() { *m = ContainerCreate_Runtime{} } -func (*ContainerCreate_Runtime) ProtoMessage() {} -func (*ContainerCreate_Runtime) Descriptor() ([]byte, []int) { - return fileDescriptor_0d1f05b8626f83ea, []int{0, 0} -} -func (m *ContainerCreate_Runtime) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ContainerCreate_Runtime) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ContainerCreate_Runtime.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*ContainerCreate) ProtoMessage() {} + +func (x *ContainerCreate) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_container_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *ContainerCreate_Runtime) XXX_Merge(src proto.Message) { - xxx_messageInfo_ContainerCreate_Runtime.Merge(m, src) -} -func (m *ContainerCreate_Runtime) XXX_Size() int { - return m.Size() -} -func (m *ContainerCreate_Runtime) XXX_DiscardUnknown() { - xxx_messageInfo_ContainerCreate_Runtime.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_ContainerCreate_Runtime proto.InternalMessageInfo - -type ContainerUpdate struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Image string `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"` - Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - SnapshotKey string `protobuf:"bytes,4,opt,name=snapshot_key,json=snapshotKey,proto3" json:"snapshot_key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +// Deprecated: Use ContainerCreate.ProtoReflect.Descriptor instead. +func (*ContainerCreate) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_container_proto_rawDescGZIP(), []int{0} } -func (m *ContainerUpdate) Reset() { *m = ContainerUpdate{} } -func (*ContainerUpdate) ProtoMessage() {} -func (*ContainerUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_0d1f05b8626f83ea, []int{1} -} -func (m *ContainerUpdate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ContainerUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ContainerUpdate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ContainerCreate) GetID() string { + if x != nil { + return x.ID } -} -func (m *ContainerUpdate) XXX_Merge(src proto.Message) { - xxx_messageInfo_ContainerUpdate.Merge(m, src) -} -func (m *ContainerUpdate) XXX_Size() int { - return m.Size() -} -func (m *ContainerUpdate) XXX_DiscardUnknown() { - xxx_messageInfo_ContainerUpdate.DiscardUnknown(m) + return "" } -var xxx_messageInfo_ContainerUpdate proto.InternalMessageInfo - -type ContainerDelete struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *ContainerCreate) GetImage() string { + if x != nil { + return x.Image + } + return "" } -func (m *ContainerDelete) Reset() { *m = ContainerDelete{} } -func (*ContainerDelete) ProtoMessage() {} -func (*ContainerDelete) Descriptor() ([]byte, []int) { - return fileDescriptor_0d1f05b8626f83ea, []int{2} -} -func (m *ContainerDelete) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ContainerDelete) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ContainerDelete.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ContainerCreate) GetRuntime() *ContainerCreate_Runtime { + if x != nil { + return x.Runtime } -} -func (m *ContainerDelete) XXX_Merge(src proto.Message) { - xxx_messageInfo_ContainerDelete.Merge(m, src) -} -func (m *ContainerDelete) XXX_Size() int { - return m.Size() -} -func (m *ContainerDelete) XXX_DiscardUnknown() { - xxx_messageInfo_ContainerDelete.DiscardUnknown(m) + return nil } -var xxx_messageInfo_ContainerDelete proto.InternalMessageInfo +type ContainerUpdate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func init() { - proto.RegisterType((*ContainerCreate)(nil), "containerd.events.ContainerCreate") - proto.RegisterType((*ContainerCreate_Runtime)(nil), "containerd.events.ContainerCreate.Runtime") - proto.RegisterType((*ContainerUpdate)(nil), "containerd.events.ContainerUpdate") - proto.RegisterMapType((map[string]string)(nil), "containerd.events.ContainerUpdate.LabelsEntry") - proto.RegisterType((*ContainerDelete)(nil), "containerd.events.ContainerDelete") + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Image string `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + SnapshotKey string `protobuf:"bytes,4,opt,name=snapshot_key,json=snapshotKey,proto3" json:"snapshot_key,omitempty"` } -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/events/container.proto", fileDescriptor_0d1f05b8626f83ea) +func (x *ContainerUpdate) Reset() { + *x = ContainerUpdate{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_container_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -var fileDescriptor_0d1f05b8626f83ea = []byte{ - // 413 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0xc1, 0x0a, 0xd3, 0x30, - 0x18, 0xc7, 0x97, 0x76, 0x6e, 0x98, 0x0a, 0x6a, 0x18, 0x52, 0x7b, 0xa8, 0x73, 0xa7, 0xe9, 0x21, - 0x85, 0x7a, 0x51, 0x77, 0xd1, 0x6d, 0x0a, 0xa2, 0x82, 0x14, 0x84, 0xe1, 0x45, 0xd2, 0x35, 0xeb, - 0x82, 0x6d, 0x52, 0xda, 0x74, 0xd0, 0x9b, 0x8f, 0xe2, 0xe3, 0xec, 0xe8, 0xc1, 0x83, 0x27, 0x71, - 0x05, 0xdf, 0xc0, 0x07, 0x90, 0x26, 0xeb, 0x56, 0x14, 0x95, 0x9d, 0xfa, 0xcf, 0xd7, 0xff, 0x3f, - 0xdf, 0xf7, 0xfb, 0x08, 0x9c, 0xc5, 0x4c, 0x6e, 0xcb, 0x10, 0xaf, 0x45, 0xea, 0xad, 0x05, 0x97, - 0x84, 0x71, 0x9a, 0x47, 0x5d, 0x49, 0x32, 0xe6, 0xd1, 0x1d, 0xe5, 0xb2, 0x38, 0x57, 0x71, 0x96, - 0x0b, 0x29, 0xd0, 0xcd, 0xb3, 0x0d, 0x6b, 0x8b, 0x73, 0x3b, 0x16, 0x22, 0x4e, 0xa8, 0xa7, 0x0c, - 0x61, 0xb9, 0xf1, 0x08, 0xaf, 0xb4, 0xdb, 0x19, 0xc5, 0x22, 0x16, 0x4a, 0x7a, 0x8d, 0x3a, 0x56, - 0x9f, 0xfc, 0x77, 0x80, 0xd3, 0x55, 0x59, 0x52, 0xc6, 0x8c, 0x7b, 0x1b, 0x46, 0x93, 0x28, 0x23, - 0x72, 0xab, 0x6f, 0x98, 0x7c, 0x01, 0xf0, 0xfa, 0xa2, 0xb5, 0x2f, 0x72, 0x4a, 0x24, 0x45, 0xb7, - 0xa0, 0xc1, 0x22, 0x1b, 0x8c, 0xc1, 0xf4, 0xea, 0x7c, 0x50, 0x7f, 0xbb, 0x63, 0xbc, 0x58, 0x06, - 0x06, 0x8b, 0xd0, 0x08, 0x5e, 0x61, 0x29, 0x89, 0xa9, 0x6d, 0x34, 0xbf, 0x02, 0x7d, 0x40, 0x4b, - 0x38, 0xcc, 0x4b, 0x2e, 0x59, 0x4a, 0x6d, 0x73, 0x0c, 0xa6, 0x96, 0x7f, 0x1f, 0xff, 0x41, 0x86, - 0x7f, 0x6b, 0x81, 0x03, 0x9d, 0x08, 0xda, 0xa8, 0xf3, 0x1a, 0x0e, 0x8f, 0x35, 0x84, 0x60, 0x9f, - 0x93, 0x94, 0xea, 0x01, 0x02, 0xa5, 0x11, 0x86, 0x43, 0x91, 0x49, 0x26, 0x78, 0xa1, 0x9a, 0x5b, - 0xfe, 0x08, 0xeb, 0x5d, 0xe1, 0x16, 0x10, 0x3f, 0xe5, 0x55, 0xd0, 0x9a, 0x26, 0x3f, 0xba, 0x58, - 0x6f, 0xb3, 0xe8, 0x72, 0xac, 0xe7, 0x70, 0x90, 0x90, 0x90, 0x26, 0x85, 0x6d, 0x8e, 0xcd, 0xa9, - 0xe5, 0xe3, 0x7f, 0x51, 0xe9, 0x0e, 0xf8, 0x95, 0x0a, 0x3c, 0xe3, 0x32, 0xaf, 0x82, 0x63, 0x1a, - 0xdd, 0x85, 0xd7, 0x0a, 0x4e, 0xb2, 0x62, 0x2b, 0xe4, 0xfb, 0x0f, 0xb4, 0xb2, 0xfb, 0xaa, 0x89, - 0xd5, 0xd6, 0x5e, 0xd2, 0xca, 0x79, 0x04, 0xad, 0x4e, 0x12, 0xdd, 0x80, 0x66, 0x63, 0xd4, 0xf8, - 0x8d, 0x6c, 0x26, 0xdc, 0x91, 0xa4, 0x3c, 0x4d, 0xa8, 0x0e, 0x8f, 0x8d, 0x87, 0x60, 0x72, 0xaf, - 0x83, 0xb9, 0xa4, 0x09, 0xfd, 0x3b, 0xe6, 0xfc, 0xcd, 0xfe, 0xe0, 0xf6, 0xbe, 0x1e, 0xdc, 0xde, - 0xc7, 0xda, 0x05, 0xfb, 0xda, 0x05, 0x9f, 0x6b, 0x17, 0x7c, 0xaf, 0x5d, 0xf0, 0xe9, 0xa7, 0x0b, - 0xde, 0xf9, 0x17, 0x3c, 0xe5, 0x99, 0xfe, 0xac, 0xc0, 0xca, 0x08, 0x07, 0x6a, 0xff, 0x0f, 0x7e, - 0x05, 0x00, 0x00, 0xff, 0xff, 0xf5, 0x09, 0xe0, 0xd6, 0x0b, 0x03, 0x00, 0x00, +func (x *ContainerUpdate) String() string { + return protoimpl.X.MessageStringOf(x) } -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *ContainerCreate) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } +func (*ContainerUpdate) ProtoMessage() {} - switch fieldpath[0] { - case "id": - return string(m.ID), len(m.ID) > 0 - case "image": - return string(m.Image), len(m.Image) > 0 - case "runtime": - // NOTE(stevvooe): This is probably not correct in many cases. - // We assume that the target message also implements the Field - // method, which isn't likely true in a lot of cases. - // - // If you have a broken build and have found this comment, - // you may be closer to a solution. - if m.Runtime == nil { - return "", false +func (x *ContainerUpdate) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_container_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - - return m.Runtime.Field(fieldpath[1:]) + return ms } - return "", false + return mi.MessageOf(x) } -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *ContainerCreate_Runtime) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "name": - return string(m.Name), len(m.Name) > 0 - case "options": - decoded, err := github_com_containerd_typeurl.UnmarshalAny(m.Options) - if err != nil { - return "", false - } - - adaptor, ok := decoded.(interface{ Field([]string) (string, bool) }) - if !ok { - return "", false - } - return adaptor.Field(fieldpath[1:]) - } - return "", false +// Deprecated: Use ContainerUpdate.ProtoReflect.Descriptor instead. +func (*ContainerUpdate) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_container_proto_rawDescGZIP(), []int{1} } -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *ContainerUpdate) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false +func (x *ContainerUpdate) GetID() string { + if x != nil { + return x.ID } - - switch fieldpath[0] { - case "id": - return string(m.ID), len(m.ID) > 0 - case "image": - return string(m.Image), len(m.Image) > 0 - case "labels": - // Labels fields have been special-cased by name. If this breaks, - // add better special casing to fieldpath plugin. - if len(m.Labels) == 0 { - return "", false - } - value, ok := m.Labels[strings.Join(fieldpath[1:], ".")] - return value, ok - case "snapshot_key": - return string(m.SnapshotKey), len(m.SnapshotKey) > 0 - } - return "", false + return "" } -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *ContainerDelete) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "id": - return string(m.ID), len(m.ID) > 0 +func (x *ContainerUpdate) GetImage() string { + if x != nil { + return x.Image } - return "", false -} -func (m *ContainerCreate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ContainerCreate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return "" } -func (m *ContainerCreate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Runtime != nil { - { - size, err := m.Runtime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContainer(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.Image) > 0 { - i -= len(m.Image) - copy(dAtA[i:], m.Image) - i = encodeVarintContainer(dAtA, i, uint64(len(m.Image))) - i-- - dAtA[i] = 0x12 +func (x *ContainerUpdate) GetLabels() map[string]string { + if x != nil { + return x.Labels } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintContainer(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return nil } -func (m *ContainerCreate_Runtime) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *ContainerUpdate) GetSnapshotKey() string { + if x != nil { + return x.SnapshotKey } - return dAtA[:n], nil + return "" } -func (m *ContainerCreate_Runtime) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +type ContainerDelete struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *ContainerCreate_Runtime) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Options != nil { - { - size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContainer(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintContainer(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` } -func (m *ContainerUpdate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *ContainerDelete) Reset() { + *x = ContainerDelete{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_container_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *ContainerUpdate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *ContainerDelete) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ContainerUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.SnapshotKey) > 0 { - i -= len(m.SnapshotKey) - copy(dAtA[i:], m.SnapshotKey) - i = encodeVarintContainer(dAtA, i, uint64(len(m.SnapshotKey))) - i-- - dAtA[i] = 0x22 - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintContainer(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintContainer(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintContainer(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a - } - } - if len(m.Image) > 0 { - i -= len(m.Image) - copy(dAtA[i:], m.Image) - i = encodeVarintContainer(dAtA, i, uint64(len(m.Image))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintContainer(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} +func (*ContainerDelete) ProtoMessage() {} -func (m *ContainerDelete) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *ContainerDelete) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_container_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *ContainerDelete) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use ContainerDelete.ProtoReflect.Descriptor instead. +func (*ContainerDelete) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_container_proto_rawDescGZIP(), []int{2} } -func (m *ContainerDelete) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintContainer(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa +func (x *ContainerDelete) GetID() string { + if x != nil { + return x.ID } - return len(dAtA) - i, nil + return "" } -func encodeVarintContainer(dAtA []byte, offset int, v uint64) int { - offset -= sovContainer(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ContainerCreate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovContainer(uint64(l)) - } - l = len(m.Image) - if l > 0 { - n += 1 + l + sovContainer(uint64(l)) - } - if m.Runtime != nil { - l = m.Runtime.Size() - n += 1 + l + sovContainer(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} +type ContainerCreate_Runtime struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *ContainerCreate_Runtime) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovContainer(uint64(l)) - } - if m.Options != nil { - l = m.Options.Size() - n += 1 + l + sovContainer(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Options *anypb.Any `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` } -func (m *ContainerUpdate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovContainer(uint64(l)) - } - l = len(m.Image) - if l > 0 { - n += 1 + l + sovContainer(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovContainer(uint64(len(k))) + 1 + len(v) + sovContainer(uint64(len(v))) - n += mapEntrySize + 1 + sovContainer(uint64(mapEntrySize)) - } - } - l = len(m.SnapshotKey) - if l > 0 { - n += 1 + l + sovContainer(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) +func (x *ContainerCreate_Runtime) Reset() { + *x = ContainerCreate_Runtime{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_container_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return n } -func (m *ContainerDelete) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovContainer(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n +func (x *ContainerCreate_Runtime) String() string { + return protoimpl.X.MessageStringOf(x) } -func sovContainer(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozContainer(x uint64) (n int) { - return sovContainer(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ContainerCreate) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ContainerCreate{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `Runtime:` + strings.Replace(fmt.Sprintf("%v", this.Runtime), "ContainerCreate_Runtime", "ContainerCreate_Runtime", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerCreate_Runtime) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ContainerCreate_Runtime{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "types.Any", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerUpdate) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&ContainerUpdate{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `Labels:` + mapStringForLabels + `,`, - `SnapshotKey:` + fmt.Sprintf("%v", this.SnapshotKey) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerDelete) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ContainerDelete{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringContainer(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *ContainerCreate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerCreate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerCreate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainer - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainer - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainer - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainer - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Image = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Runtime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainer - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainer - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Runtime == nil { - m.Runtime = &ContainerCreate_Runtime{} - } - if err := m.Runtime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainer(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainer - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } +func (*ContainerCreate_Runtime) ProtoMessage() {} - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ContainerCreate_Runtime) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Runtime: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Runtime: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainer - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainer - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainer - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainer - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Options == nil { - m.Options = &types.Any{} - } - if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainer(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainer - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy +func (x *ContainerCreate_Runtime) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_container_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } + return mi.MessageOf(x) +} - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil +// Deprecated: Use ContainerCreate_Runtime.ProtoReflect.Descriptor instead. +func (*ContainerCreate_Runtime) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_container_proto_rawDescGZIP(), []int{0, 0} } -func (m *ContainerUpdate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerUpdate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerUpdate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainer - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainer - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainer - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainer - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Image = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainer - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainer - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthContainer - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthContainer - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthContainer - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthContainer - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipContainer(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainer - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SnapshotKey", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainer - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainer - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SnapshotKey = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainer(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainer - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *ContainerCreate_Runtime) GetName() string { + if x != nil { + return x.Name } - return nil + return "" } -func (m *ContainerDelete) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerDelete: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerDelete: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainer - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainer - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainer - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainer(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainer - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *ContainerCreate_Runtime) GetOptions() *anypb.Any { + if x != nil { + return x.Options } return nil } -func skipContainer(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowContainer - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowContainer - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowContainer - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthContainer - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupContainer - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthContainer - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF + +var File_github_com_containerd_containerd_api_events_container_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_events_container_proto_rawDesc = []byte{ + 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x40, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcc, 0x01, + 0x0a, 0x0f, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, + 0x64, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x07, 0x72, 0x75, 0x6e, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x52, 0x07, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x4d, 0x0a, + 0x07, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x41, 0x6e, 0x79, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xdd, 0x01, 0x0a, + 0x0f, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, + 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x46, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x21, + 0x0a, 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4b, 0x65, + 0x79, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x21, 0x0a, 0x0f, + 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x42, + 0x38, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x3b, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x73, 0xa0, 0xf4, 0x1e, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( - ErrInvalidLengthContainer = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowContainer = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupContainer = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_events_container_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_events_container_proto_rawDescData = file_github_com_containerd_containerd_api_events_container_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_events_container_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_events_container_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_events_container_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_events_container_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_events_container_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_events_container_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_github_com_containerd_containerd_api_events_container_proto_goTypes = []interface{}{ + (*ContainerCreate)(nil), // 0: containerd.events.ContainerCreate + (*ContainerUpdate)(nil), // 1: containerd.events.ContainerUpdate + (*ContainerDelete)(nil), // 2: containerd.events.ContainerDelete + (*ContainerCreate_Runtime)(nil), // 3: containerd.events.ContainerCreate.Runtime + nil, // 4: containerd.events.ContainerUpdate.LabelsEntry + (*anypb.Any)(nil), // 5: google.protobuf.Any +} +var file_github_com_containerd_containerd_api_events_container_proto_depIdxs = []int32{ + 3, // 0: containerd.events.ContainerCreate.runtime:type_name -> containerd.events.ContainerCreate.Runtime + 4, // 1: containerd.events.ContainerUpdate.labels:type_name -> containerd.events.ContainerUpdate.LabelsEntry + 5, // 2: containerd.events.ContainerCreate.Runtime.options:type_name -> google.protobuf.Any + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_events_container_proto_init() } +func file_github_com_containerd_containerd_api_events_container_proto_init() { + if File_github_com_containerd_containerd_api_events_container_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_events_container_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ContainerCreate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_container_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ContainerUpdate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_container_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ContainerDelete); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_container_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ContainerCreate_Runtime); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_events_container_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_events_container_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_events_container_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_events_container_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_events_container_proto = out.File + file_github_com_containerd_containerd_api_events_container_proto_rawDesc = nil + file_github_com_containerd_containerd_api_events_container_proto_goTypes = nil + file_github_com_containerd_containerd_api_events_container_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/events/container.proto b/vendor/github.com/containerd/containerd/api/events/container.proto index dfeca308ea314..29fd18f88dde2 100644 --- a/vendor/github.com/containerd/containerd/api/events/container.proto +++ b/vendor/github.com/containerd/containerd/api/events/container.proto @@ -19,8 +19,7 @@ syntax = "proto3"; package containerd.events; import "google/protobuf/any.proto"; -import weak "gogoproto/gogo.proto"; -import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; +import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.plugin.fieldpath_all) = true; diff --git a/vendor/github.com/containerd/containerd/api/events/container_fieldpath.pb.go b/vendor/github.com/containerd/containerd/api/events/container_fieldpath.pb.go new file mode 100644 index 0000000000000..c23b07762e3fe --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/container_fieldpath.pb.go @@ -0,0 +1,95 @@ +// Code generated by protoc-gen-go-fieldpath. DO NOT EDIT. +// source: github.com/containerd/containerd/api/events/container.proto +package events + +import ( + v2 "github.com/containerd/typeurl/v2" + strings "strings" +) + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *ContainerCreate) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "id": + return string(m.ID), len(m.ID) > 0 + case "image": + return string(m.Image), len(m.Image) > 0 + case "runtime": + // NOTE(stevvooe): This is probably not correct in many cases. + // We assume that the target message also implements the Field + // method, which isn't likely true in a lot of cases. + // + // If you have a broken build and have found this comment, + // you may be closer to a solution. + if m.Runtime == nil { + return "", false + } + return m.Runtime.Field(fieldpath[1:]) + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *ContainerCreate_Runtime) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "name": + return string(m.Name), len(m.Name) > 0 + case "options": + decoded, err := v2.UnmarshalAny(m.Options) + if err != nil { + return "", false + } + adaptor, ok := decoded.(interface{ Field([]string) (string, bool) }) + if !ok { + return "", false + } + return adaptor.Field(fieldpath[1:]) + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *ContainerUpdate) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "id": + return string(m.ID), len(m.ID) > 0 + case "image": + return string(m.Image), len(m.Image) > 0 + case "labels": + // Labels fields have been special-cased by name. If this breaks, + // add better special casing to fieldpath plugin. + if len(m.Labels) == 0 { + return "", false + } + value, ok := m.Labels[strings.Join(fieldpath[1:], ".")] + return value, ok + case "snapshot_key": + return string(m.SnapshotKey), len(m.SnapshotKey) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *ContainerDelete) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "id": + return string(m.ID), len(m.ID) > 0 + } + return "", false +} diff --git a/vendor/github.com/containerd/containerd/api/events/content.pb.go b/vendor/github.com/containerd/containerd/api/events/content.pb.go index 0a7ec9325d351..9e3f843e2bff4 100644 --- a/vendor/github.com/containerd/containerd/api/events/content.pb.go +++ b/vendor/github.com/containerd/containerd/api/events/content.pb.go @@ -1,359 +1,168 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/events/content.proto package events import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" - io "io" - math "math" - math_bits "math/bits" + _ "github.com/containerd/containerd/protobuf/plugin" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type ContentDelete struct { - Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ContentDelete) Reset() { *m = ContentDelete{} } -func (*ContentDelete) ProtoMessage() {} -func (*ContentDelete) Descriptor() ([]byte, []int) { - return fileDescriptor_dfb34b8b808e2ecd, []int{0} -} -func (m *ContentDelete) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ContentDelete) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ContentDelete.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ContentDelete) XXX_Merge(src proto.Message) { - xxx_messageInfo_ContentDelete.Merge(m, src) -} -func (m *ContentDelete) XXX_Size() int { - return m.Size() -} -func (m *ContentDelete) XXX_DiscardUnknown() { - xxx_messageInfo_ContentDelete.DiscardUnknown(m) -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -var xxx_messageInfo_ContentDelete proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ContentDelete)(nil), "containerd.events.ContentDelete") + Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"` } -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/events/content.proto", fileDescriptor_dfb34b8b808e2ecd) +func (x *ContentDelete) Reset() { + *x = ContentDelete{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_content_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -var fileDescriptor_dfb34b8b808e2ecd = []byte{ - // 228 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, - 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0x83, 0x45, 0x53, - 0xf3, 0x4a, 0xf4, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0x04, 0x11, 0x8a, 0xf4, 0x20, 0x0a, 0xa4, - 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0xb2, 0xfa, 0x20, 0x16, 0x44, 0xa1, 0x94, 0x03, 0x41, 0x3b, - 0xc0, 0xea, 0x92, 0x4a, 0xd3, 0xf4, 0x0b, 0x72, 0x4a, 0xd3, 0x33, 0xf3, 0xf4, 0xd3, 0x32, 0x53, - 0x73, 0x52, 0x0a, 0x12, 0x4b, 0x32, 0x20, 0x26, 0x28, 0x45, 0x73, 0xf1, 0x3a, 0x43, 0xec, 0x76, - 0x49, 0xcd, 0x49, 0x2d, 0x49, 0x15, 0xf2, 0xe2, 0x62, 0x4b, 0xc9, 0x4c, 0x4f, 0x2d, 0x2e, 0x91, - 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x74, 0x32, 0x3a, 0x71, 0x4f, 0x9e, 0xe1, 0xd6, 0x3d, 0x79, 0x2d, - 0x24, 0xab, 0xf2, 0x0b, 0x52, 0xf3, 0xe0, 0x76, 0x14, 0xeb, 0xa7, 0xe7, 0xeb, 0x42, 0xb4, 0xe8, - 0xb9, 0x80, 0xa9, 0x20, 0xa8, 0x09, 0x4e, 0x01, 0x27, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, - 0xd0, 0xf0, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, - 0x63, 0x5c, 0xf0, 0x45, 0x8e, 0x31, 0xca, 0x88, 0x84, 0x00, 0xb2, 0x86, 0x50, 0x11, 0x0c, 0x11, - 0x8c, 0x49, 0x6c, 0x60, 0x97, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x4b, 0x78, 0x99, 0xee, - 0x61, 0x01, 0x00, 0x00, +func (x *ContentDelete) String() string { + return protoimpl.X.MessageStringOf(x) } -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *ContentDelete) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } +func (*ContentDelete) ProtoMessage() {} - switch fieldpath[0] { - case "digest": - return string(m.Digest), len(m.Digest) > 0 - } - return "", false -} -func (m *ContentDelete) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *ContentDelete) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_content_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *ContentDelete) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use ContentDelete.ProtoReflect.Descriptor instead. +func (*ContentDelete) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_content_proto_rawDescGZIP(), []int{0} } -func (m *ContentDelete) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) +func (x *ContentDelete) GetDigest() string { + if x != nil { + return x.Digest } - if len(m.Digest) > 0 { - i -= len(m.Digest) - copy(dAtA[i:], m.Digest) - i = encodeVarintContent(dAtA, i, uint64(len(m.Digest))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return "" +} + +var File_github_com_containerd_containerd_api_events_content_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_events_content_proto_rawDesc = []byte{ + 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x40, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x27, 0x0a, 0x0d, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x42, 0x38, 0x5a, 0x32, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x3b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0xa0, + 0xf4, 0x1e, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func encodeVarintContent(dAtA []byte, offset int, v uint64) int { - offset -= sovContent(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ContentDelete) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Digest) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} +var ( + file_github_com_containerd_containerd_api_events_content_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_events_content_proto_rawDescData = file_github_com_containerd_containerd_api_events_content_proto_rawDesc +) -func sovContent(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozContent(x uint64) (n int) { - return sovContent(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +func file_github_com_containerd_containerd_api_events_content_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_events_content_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_events_content_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_events_content_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_events_content_proto_rawDescData } -func (this *ContentDelete) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ContentDelete{`, - `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s + +var file_github_com_containerd_containerd_api_events_content_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_github_com_containerd_containerd_api_events_content_proto_goTypes = []interface{}{ + (*ContentDelete)(nil), // 0: containerd.events.ContentDelete } -func valueToStringContent(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) +var file_github_com_containerd_containerd_api_events_content_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -func (m *ContentDelete) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContentDelete: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContentDelete: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func init() { file_github_com_containerd_containerd_api_events_content_proto_init() } +func file_github_com_containerd_containerd_api_events_content_proto_init() { + if File_github_com_containerd_containerd_api_events_content_proto != nil { + return } - return nil -} -func skipContent(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowContent - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowContent - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_events_content_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ContentDelete); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowContent - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthContent - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupContent - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthContent - } - if depth == 0 { - return iNdEx, nil } } - return 0, io.ErrUnexpectedEOF + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_events_content_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_events_content_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_events_content_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_events_content_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_events_content_proto = out.File + file_github_com_containerd_containerd_api_events_content_proto_rawDesc = nil + file_github_com_containerd_containerd_api_events_content_proto_goTypes = nil + file_github_com_containerd_containerd_api_events_content_proto_depIdxs = nil } - -var ( - ErrInvalidLengthContent = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowContent = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupContent = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/containerd/containerd/api/events/content.proto b/vendor/github.com/containerd/containerd/api/events/content.proto index b8f84bc89b736..97d4241391476 100644 --- a/vendor/github.com/containerd/containerd/api/events/content.proto +++ b/vendor/github.com/containerd/containerd/api/events/content.proto @@ -18,12 +18,11 @@ syntax = "proto3"; package containerd.events; -import weak "gogoproto/gogo.proto"; -import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; +import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.plugin.fieldpath_all) = true; message ContentDelete { - string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + string digest = 1; } diff --git a/vendor/github.com/containerd/containerd/api/events/content_fieldpath.pb.go b/vendor/github.com/containerd/containerd/api/events/content_fieldpath.pb.go new file mode 100644 index 0000000000000..9485b664c19c7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/content_fieldpath.pb.go @@ -0,0 +1,16 @@ +// Code generated by protoc-gen-go-fieldpath. DO NOT EDIT. +// source: github.com/containerd/containerd/api/events/content.proto +package events + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *ContentDelete) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "digest": + return string(m.Digest), len(m.Digest) > 0 + } + return "", false +} diff --git a/vendor/github.com/containerd/containerd/api/events/image.pb.go b/vendor/github.com/containerd/containerd/api/events/image.pb.go index 747026945449d..111af29e6c700 100644 --- a/vendor/github.com/containerd/containerd/api/events/image.pb.go +++ b/vendor/github.com/containerd/containerd/api/events/image.pb.go @@ -1,1109 +1,330 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/events/image.proto package events import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - io "io" - math "math" - math_bits "math/bits" + _ "github.com/containerd/containerd/protobuf/plugin" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type ImageCreate struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *ImageCreate) Reset() { *m = ImageCreate{} } -func (*ImageCreate) ProtoMessage() {} -func (*ImageCreate) Descriptor() ([]byte, []int) { - return fileDescriptor_7085610f7b33e042, []int{0} -} -func (m *ImageCreate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *ImageCreate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ImageCreate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil + +func (x *ImageCreate) Reset() { + *x = ImageCreate{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_image_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ImageCreate) XXX_Merge(src proto.Message) { - xxx_messageInfo_ImageCreate.Merge(m, src) -} -func (m *ImageCreate) XXX_Size() int { - return m.Size() -} -func (m *ImageCreate) XXX_DiscardUnknown() { - xxx_messageInfo_ImageCreate.DiscardUnknown(m) -} - -var xxx_messageInfo_ImageCreate proto.InternalMessageInfo -type ImageUpdate struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *ImageCreate) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ImageUpdate) Reset() { *m = ImageUpdate{} } -func (*ImageUpdate) ProtoMessage() {} -func (*ImageUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_7085610f7b33e042, []int{1} -} -func (m *ImageUpdate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ImageUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ImageUpdate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*ImageCreate) ProtoMessage() {} + +func (x *ImageCreate) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_image_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *ImageUpdate) XXX_Merge(src proto.Message) { - xxx_messageInfo_ImageUpdate.Merge(m, src) -} -func (m *ImageUpdate) XXX_Size() int { - return m.Size() -} -func (m *ImageUpdate) XXX_DiscardUnknown() { - xxx_messageInfo_ImageUpdate.DiscardUnknown(m) -} - -var xxx_messageInfo_ImageUpdate proto.InternalMessageInfo -type ImageDelete struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +// Deprecated: Use ImageCreate.ProtoReflect.Descriptor instead. +func (*ImageCreate) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_image_proto_rawDescGZIP(), []int{0} } -func (m *ImageDelete) Reset() { *m = ImageDelete{} } -func (*ImageDelete) ProtoMessage() {} -func (*ImageDelete) Descriptor() ([]byte, []int) { - return fileDescriptor_7085610f7b33e042, []int{2} -} -func (m *ImageDelete) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ImageDelete) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ImageDelete.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ImageCreate) GetName() string { + if x != nil { + return x.Name } + return "" } -func (m *ImageDelete) XXX_Merge(src proto.Message) { - xxx_messageInfo_ImageDelete.Merge(m, src) -} -func (m *ImageDelete) XXX_Size() int { - return m.Size() -} -func (m *ImageDelete) XXX_DiscardUnknown() { - xxx_messageInfo_ImageDelete.DiscardUnknown(m) -} - -var xxx_messageInfo_ImageDelete proto.InternalMessageInfo -func init() { - proto.RegisterType((*ImageCreate)(nil), "containerd.services.images.v1.ImageCreate") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.images.v1.ImageCreate.LabelsEntry") - proto.RegisterType((*ImageUpdate)(nil), "containerd.services.images.v1.ImageUpdate") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.images.v1.ImageUpdate.LabelsEntry") - proto.RegisterType((*ImageDelete)(nil), "containerd.services.images.v1.ImageDelete") +func (x *ImageCreate) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil } -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/events/image.proto", fileDescriptor_7085610f7b33e042) -} +type ImageUpdate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -var fileDescriptor_7085610f7b33e042 = []byte{ - // 292 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4f, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, - 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x67, 0xe6, - 0x26, 0xa6, 0xa7, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0x94, 0xe8, 0x15, 0xa7, - 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb, 0x81, 0x15, 0x14, 0xeb, 0x95, 0x19, 0x4a, 0x39, 0x10, - 0x34, 0x17, 0x6c, 0x4c, 0x52, 0x69, 0x9a, 0x7e, 0x41, 0x4e, 0x69, 0x7a, 0x66, 0x9e, 0x7e, 0x5a, - 0x66, 0x6a, 0x4e, 0x4a, 0x41, 0x62, 0x49, 0x06, 0xc4, 0x02, 0xa5, 0x35, 0x8c, 0x5c, 0xdc, 0x9e, - 0x20, 0xf3, 0x9c, 0x8b, 0x52, 0x13, 0x4b, 0x52, 0x85, 0x84, 0xb8, 0x58, 0xf2, 0x12, 0x73, 0x53, - 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0xc0, 0x6c, 0x21, 0x3f, 0x2e, 0xb6, 0x9c, 0xc4, 0xa4, - 0xd4, 0x9c, 0x62, 0x09, 0x26, 0x05, 0x66, 0x0d, 0x6e, 0x23, 0x33, 0x3d, 0xbc, 0xae, 0xd2, 0x43, - 0x32, 0x4f, 0xcf, 0x07, 0xac, 0xd1, 0x35, 0xaf, 0xa4, 0xa8, 0x32, 0x08, 0x6a, 0x8a, 0x94, 0x25, - 0x17, 0x37, 0x92, 0xb0, 0x90, 0x00, 0x17, 0x73, 0x76, 0x6a, 0x25, 0xd4, 0x46, 0x10, 0x53, 0x48, - 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, 0x34, 0x55, 0x82, 0x09, 0x2c, 0x06, 0xe1, 0x58, 0x31, 0x59, - 0x30, 0x22, 0x9c, 0x1b, 0x5a, 0x90, 0x42, 0x55, 0xe7, 0x42, 0xcc, 0xa3, 0xb6, 0x73, 0x15, 0xa1, - 0xae, 0x75, 0x49, 0xcd, 0x49, 0xc5, 0xee, 0x5a, 0xa7, 0x80, 0x13, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, - 0x94, 0x63, 0x68, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, - 0x1e, 0xc9, 0x31, 0x2e, 0xf8, 0x22, 0xc7, 0x18, 0x65, 0x44, 0x42, 0xc2, 0xb1, 0x86, 0x50, 0x11, - 0x0c, 0x49, 0x6c, 0xe0, 0xb8, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x41, 0x80, 0x92, 0x17, - 0x77, 0x02, 0x00, 0x00, + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *ImageCreate) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false +func (x *ImageUpdate) Reset() { + *x = ImageUpdate{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_image_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - - switch fieldpath[0] { - case "name": - return string(m.Name), len(m.Name) > 0 - case "labels": - // Labels fields have been special-cased by name. If this breaks, - // add better special casing to fieldpath plugin. - if len(m.Labels) == 0 { - return "", false - } - value, ok := m.Labels[strings.Join(fieldpath[1:], ".")] - return value, ok - } - return "", false } -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *ImageUpdate) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "name": - return string(m.Name), len(m.Name) > 0 - case "labels": - // Labels fields have been special-cased by name. If this breaks, - // add better special casing to fieldpath plugin. - if len(m.Labels) == 0 { - return "", false - } - value, ok := m.Labels[strings.Join(fieldpath[1:], ".")] - return value, ok - } - return "", false +func (x *ImageUpdate) String() string { + return protoimpl.X.MessageStringOf(x) } -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *ImageDelete) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } +func (*ImageUpdate) ProtoMessage() {} - switch fieldpath[0] { - case "name": - return string(m.Name), len(m.Name) > 0 - } - return "", false -} -func (m *ImageCreate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *ImageUpdate) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_image_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *ImageCreate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use ImageUpdate.ProtoReflect.Descriptor instead. +func (*ImageUpdate) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_image_proto_rawDescGZIP(), []int{1} } -func (m *ImageCreate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) +func (x *ImageUpdate) GetName() string { + if x != nil { + return x.Name } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintImage(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintImage(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintImage(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintImage(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return "" } -func (m *ImageUpdate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *ImageUpdate) GetLabels() map[string]string { + if x != nil { + return x.Labels } - return dAtA[:n], nil + return nil } -func (m *ImageUpdate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +type ImageDelete struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *ImageUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintImage(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintImage(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintImage(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintImage(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } -func (m *ImageDelete) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *ImageDelete) Reset() { + *x = ImageDelete{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_image_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *ImageDelete) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *ImageDelete) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ImageDelete) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintImage(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintImage(dAtA []byte, offset int, v uint64) int { - offset -= sovImage(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ImageCreate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovImage(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovImage(uint64(len(k))) + 1 + len(v) + sovImage(uint64(len(v))) - n += mapEntrySize + 1 + sovImage(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} +func (*ImageDelete) ProtoMessage() {} -func (m *ImageUpdate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovImage(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovImage(uint64(len(k))) + 1 + len(v) + sovImage(uint64(len(v))) - n += mapEntrySize + 1 + sovImage(uint64(mapEntrySize)) +func (x *ImageDelete) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_image_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ImageDelete) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovImage(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n + return mi.MessageOf(x) } -func sovImage(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozImage(x uint64) (n int) { - return sovImage(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ImageCreate) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&ImageCreate{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ImageUpdate) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&ImageUpdate{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ImageDelete) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ImageDelete{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringImage(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) +// Deprecated: Use ImageDelete.ProtoReflect.Descriptor instead. +func (*ImageDelete) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_image_proto_rawDescGZIP(), []int{2} +} + +func (x *ImageDelete) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +var File_github_com_containerd_containerd_api_events_image_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_events_image_proto_rawDesc = []byte{ + 0x0a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x69, 0x6d, + 0x61, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, + 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x70, 0x61, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xac, 0x01, 0x0a, 0x0b, 0x49, + 0x6d, 0x61, 0x67, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4e, + 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, + 0x6d, 0x61, 0x67, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, + 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xac, 0x01, 0x0a, 0x0b, 0x49, 0x6d, + 0x61, 0x67, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a, + 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, + 0x61, 0x67, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, + 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x21, 0x0a, 0x0b, 0x49, 0x6d, 0x61, 0x67, + 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x42, 0x38, 0x5a, 0x32, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x3b, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x73, 0xa0, 0xf4, 0x1e, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func (m *ImageCreate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageCreate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageCreate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthImage - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthImage - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthImage - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthImage - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthImage - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthImage - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthImage - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthImage - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipImage(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImage - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipImage(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImage - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ImageUpdate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageUpdate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageUpdate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthImage - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthImage - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthImage - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthImage - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthImage - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthImage - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthImage - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthImage - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipImage(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImage - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipImage(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImage - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } +var ( + file_github_com_containerd_containerd_api_events_image_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_events_image_proto_rawDescData = file_github_com_containerd_containerd_api_events_image_proto_rawDesc +) - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ImageDelete) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageDelete: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageDelete: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) +func file_github_com_containerd_containerd_api_events_image_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_events_image_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_events_image_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_events_image_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_events_image_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_events_image_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_github_com_containerd_containerd_api_events_image_proto_goTypes = []interface{}{ + (*ImageCreate)(nil), // 0: containerd.services.images.v1.ImageCreate + (*ImageUpdate)(nil), // 1: containerd.services.images.v1.ImageUpdate + (*ImageDelete)(nil), // 2: containerd.services.images.v1.ImageDelete + nil, // 3: containerd.services.images.v1.ImageCreate.LabelsEntry + nil, // 4: containerd.services.images.v1.ImageUpdate.LabelsEntry +} +var file_github_com_containerd_containerd_api_events_image_proto_depIdxs = []int32{ + 3, // 0: containerd.services.images.v1.ImageCreate.labels:type_name -> containerd.services.images.v1.ImageCreate.LabelsEntry + 4, // 1: containerd.services.images.v1.ImageUpdate.labels:type_name -> containerd.services.images.v1.ImageUpdate.LabelsEntry + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_events_image_proto_init() } +func file_github_com_containerd_containerd_api_events_image_proto_init() { + if File_github_com_containerd_containerd_api_events_image_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_events_image_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImageCreate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthImage - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthImage - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipImage(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImage - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipImage(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowImage - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + file_github_com_containerd_containerd_api_events_image_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImageUpdate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowImage - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowImage - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthImage - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupImage + file_github_com_containerd_containerd_api_events_image_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImageDelete); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthImage - } - if depth == 0 { - return iNdEx, nil } } - return 0, io.ErrUnexpectedEOF + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_events_image_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_events_image_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_events_image_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_events_image_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_events_image_proto = out.File + file_github_com_containerd_containerd_api_events_image_proto_rawDesc = nil + file_github_com_containerd_containerd_api_events_image_proto_goTypes = nil + file_github_com_containerd_containerd_api_events_image_proto_depIdxs = nil } - -var ( - ErrInvalidLengthImage = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowImage = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupImage = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/containerd/containerd/api/events/image.proto b/vendor/github.com/containerd/containerd/api/events/image.proto index fe455b54cab22..c09c7384f3af8 100644 --- a/vendor/github.com/containerd/containerd/api/events/image.proto +++ b/vendor/github.com/containerd/containerd/api/events/image.proto @@ -18,7 +18,7 @@ syntax = "proto3"; package containerd.services.images.v1; -import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; +import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.plugin.fieldpath_all) = true; diff --git a/vendor/github.com/containerd/containerd/api/events/image_fieldpath.pb.go b/vendor/github.com/containerd/containerd/api/events/image_fieldpath.pb.go new file mode 100644 index 0000000000000..2a56fcf9d831e --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/image_fieldpath.pb.go @@ -0,0 +1,62 @@ +// Code generated by protoc-gen-go-fieldpath. DO NOT EDIT. +// source: github.com/containerd/containerd/api/events/image.proto +package events + +import ( + strings "strings" +) + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *ImageCreate) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "name": + return string(m.Name), len(m.Name) > 0 + case "labels": + // Labels fields have been special-cased by name. If this breaks, + // add better special casing to fieldpath plugin. + if len(m.Labels) == 0 { + return "", false + } + value, ok := m.Labels[strings.Join(fieldpath[1:], ".")] + return value, ok + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *ImageUpdate) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "name": + return string(m.Name), len(m.Name) > 0 + case "labels": + // Labels fields have been special-cased by name. If this breaks, + // add better special casing to fieldpath plugin. + if len(m.Labels) == 0 { + return "", false + } + value, ok := m.Labels[strings.Join(fieldpath[1:], ".")] + return value, ok + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *ImageDelete) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "name": + return string(m.Name), len(m.Name) > 0 + } + return "", false +} diff --git a/vendor/github.com/containerd/containerd/api/events/namespace.pb.go b/vendor/github.com/containerd/containerd/api/events/namespace.pb.go index d406a987e98d1..801c1219e8a97 100644 --- a/vendor/github.com/containerd/containerd/api/events/namespace.pb.go +++ b/vendor/github.com/containerd/containerd/api/events/namespace.pb.go @@ -1,1109 +1,330 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/events/namespace.proto package events import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - io "io" - math "math" - math_bits "math/bits" + _ "github.com/containerd/containerd/protobuf/plugin" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type NamespaceCreate struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *NamespaceCreate) Reset() { *m = NamespaceCreate{} } -func (*NamespaceCreate) ProtoMessage() {} -func (*NamespaceCreate) Descriptor() ([]byte, []int) { - return fileDescriptor_6cd45d1d5adffe29, []int{0} -} -func (m *NamespaceCreate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *NamespaceCreate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NamespaceCreate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil + +func (x *NamespaceCreate) Reset() { + *x = NamespaceCreate{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_namespace_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *NamespaceCreate) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamespaceCreate.Merge(m, src) -} -func (m *NamespaceCreate) XXX_Size() int { - return m.Size() -} -func (m *NamespaceCreate) XXX_DiscardUnknown() { - xxx_messageInfo_NamespaceCreate.DiscardUnknown(m) -} - -var xxx_messageInfo_NamespaceCreate proto.InternalMessageInfo -type NamespaceUpdate struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *NamespaceCreate) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NamespaceUpdate) Reset() { *m = NamespaceUpdate{} } -func (*NamespaceUpdate) ProtoMessage() {} -func (*NamespaceUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_6cd45d1d5adffe29, []int{1} -} -func (m *NamespaceUpdate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NamespaceUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NamespaceUpdate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*NamespaceCreate) ProtoMessage() {} + +func (x *NamespaceCreate) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_namespace_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *NamespaceUpdate) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamespaceUpdate.Merge(m, src) -} -func (m *NamespaceUpdate) XXX_Size() int { - return m.Size() -} -func (m *NamespaceUpdate) XXX_DiscardUnknown() { - xxx_messageInfo_NamespaceUpdate.DiscardUnknown(m) -} - -var xxx_messageInfo_NamespaceUpdate proto.InternalMessageInfo -type NamespaceDelete struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +// Deprecated: Use NamespaceCreate.ProtoReflect.Descriptor instead. +func (*NamespaceCreate) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_namespace_proto_rawDescGZIP(), []int{0} } -func (m *NamespaceDelete) Reset() { *m = NamespaceDelete{} } -func (*NamespaceDelete) ProtoMessage() {} -func (*NamespaceDelete) Descriptor() ([]byte, []int) { - return fileDescriptor_6cd45d1d5adffe29, []int{2} -} -func (m *NamespaceDelete) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NamespaceDelete) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NamespaceDelete.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *NamespaceCreate) GetName() string { + if x != nil { + return x.Name } + return "" } -func (m *NamespaceDelete) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamespaceDelete.Merge(m, src) -} -func (m *NamespaceDelete) XXX_Size() int { - return m.Size() -} -func (m *NamespaceDelete) XXX_DiscardUnknown() { - xxx_messageInfo_NamespaceDelete.DiscardUnknown(m) -} - -var xxx_messageInfo_NamespaceDelete proto.InternalMessageInfo -func init() { - proto.RegisterType((*NamespaceCreate)(nil), "containerd.events.NamespaceCreate") - proto.RegisterMapType((map[string]string)(nil), "containerd.events.NamespaceCreate.LabelsEntry") - proto.RegisterType((*NamespaceUpdate)(nil), "containerd.events.NamespaceUpdate") - proto.RegisterMapType((map[string]string)(nil), "containerd.events.NamespaceUpdate.LabelsEntry") - proto.RegisterType((*NamespaceDelete)(nil), "containerd.events.NamespaceDelete") +func (x *NamespaceCreate) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil } -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/events/namespace.proto", fileDescriptor_6cd45d1d5adffe29) -} +type NamespaceUpdate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -var fileDescriptor_6cd45d1d5adffe29 = []byte{ - // 296 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, - 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0xe7, 0x25, - 0xe6, 0xa6, 0x16, 0x17, 0x24, 0x26, 0xa7, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x22, - 0x94, 0xe9, 0x41, 0x94, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x65, 0xf5, 0x41, 0x2c, 0x88, - 0x42, 0x29, 0x07, 0x82, 0xb6, 0x80, 0xd5, 0x25, 0x95, 0xa6, 0xe9, 0x17, 0xe4, 0x94, 0xa6, 0x67, - 0xe6, 0xe9, 0xa7, 0x65, 0xa6, 0xe6, 0xa4, 0x14, 0x24, 0x96, 0x64, 0x40, 0x4c, 0x50, 0x5a, 0xc1, - 0xc8, 0xc5, 0xef, 0x07, 0xb3, 0xde, 0xb9, 0x28, 0x35, 0xb1, 0x24, 0x55, 0x48, 0x88, 0x8b, 0x05, - 0xe4, 0x22, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x30, 0x5b, 0xc8, 0x8d, 0x8b, 0x2d, 0x27, - 0x31, 0x29, 0x35, 0xa7, 0x58, 0x82, 0x49, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x4f, 0x0f, 0xc3, 0x8d, - 0x7a, 0x68, 0xe6, 0xe8, 0xf9, 0x80, 0x35, 0xb8, 0xe6, 0x95, 0x14, 0x55, 0x06, 0x41, 0x75, 0x4b, - 0x59, 0x72, 0x71, 0x23, 0x09, 0x0b, 0x09, 0x70, 0x31, 0x67, 0xa7, 0x56, 0x42, 0x6d, 0x02, 0x31, - 0x85, 0x44, 0xb8, 0x58, 0xcb, 0x12, 0x73, 0x4a, 0x53, 0x25, 0x98, 0xc0, 0x62, 0x10, 0x8e, 0x15, - 0x93, 0x05, 0x23, 0xaa, 0x53, 0x43, 0x0b, 0x52, 0xa8, 0xe2, 0x54, 0x88, 0x39, 0xd4, 0x76, 0xaa, - 0x2a, 0x92, 0x4b, 0x5d, 0x52, 0x73, 0x52, 0xb1, 0xbb, 0xd4, 0x29, 0xe0, 0xc4, 0x43, 0x39, 0x86, - 0x1b, 0x0f, 0xe5, 0x18, 0x1a, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, - 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x0b, 0xbe, 0xc8, 0x31, 0x46, 0x19, 0x91, 0x90, 0x84, 0xac, 0x21, - 0x54, 0x04, 0x43, 0x04, 0x63, 0x12, 0x1b, 0x38, 0x66, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, - 0x00, 0x50, 0x87, 0x59, 0x83, 0x02, 0x00, 0x00, + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *NamespaceCreate) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false +func (x *NamespaceUpdate) Reset() { + *x = NamespaceUpdate{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_namespace_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - - switch fieldpath[0] { - case "name": - return string(m.Name), len(m.Name) > 0 - case "labels": - // Labels fields have been special-cased by name. If this breaks, - // add better special casing to fieldpath plugin. - if len(m.Labels) == 0 { - return "", false - } - value, ok := m.Labels[strings.Join(fieldpath[1:], ".")] - return value, ok - } - return "", false } -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *NamespaceUpdate) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "name": - return string(m.Name), len(m.Name) > 0 - case "labels": - // Labels fields have been special-cased by name. If this breaks, - // add better special casing to fieldpath plugin. - if len(m.Labels) == 0 { - return "", false - } - value, ok := m.Labels[strings.Join(fieldpath[1:], ".")] - return value, ok - } - return "", false +func (x *NamespaceUpdate) String() string { + return protoimpl.X.MessageStringOf(x) } -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *NamespaceDelete) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } +func (*NamespaceUpdate) ProtoMessage() {} - switch fieldpath[0] { - case "name": - return string(m.Name), len(m.Name) > 0 - } - return "", false -} -func (m *NamespaceCreate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *NamespaceUpdate) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_namespace_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *NamespaceCreate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use NamespaceUpdate.ProtoReflect.Descriptor instead. +func (*NamespaceUpdate) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_namespace_proto_rawDescGZIP(), []int{1} } -func (m *NamespaceCreate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) +func (x *NamespaceUpdate) GetName() string { + if x != nil { + return x.Name } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintNamespace(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintNamespace(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintNamespace(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return "" } -func (m *NamespaceUpdate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *NamespaceUpdate) GetLabels() map[string]string { + if x != nil { + return x.Labels } - return dAtA[:n], nil + return nil } -func (m *NamespaceUpdate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +type NamespaceDelete struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *NamespaceUpdate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintNamespace(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintNamespace(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintNamespace(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } -func (m *NamespaceDelete) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *NamespaceDelete) Reset() { + *x = NamespaceDelete{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_namespace_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *NamespaceDelete) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *NamespaceDelete) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *NamespaceDelete) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintNamespace(dAtA []byte, offset int, v uint64) int { - offset -= sovNamespace(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *NamespaceCreate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovNamespace(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v))) - n += mapEntrySize + 1 + sovNamespace(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} +func (*NamespaceDelete) ProtoMessage() {} -func (m *NamespaceUpdate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovNamespace(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v))) - n += mapEntrySize + 1 + sovNamespace(uint64(mapEntrySize)) +func (x *NamespaceDelete) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_namespace_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *NamespaceDelete) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovNamespace(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n + return mi.MessageOf(x) } -func sovNamespace(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozNamespace(x uint64) (n int) { - return sovNamespace(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *NamespaceCreate) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&NamespaceCreate{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *NamespaceUpdate) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&NamespaceUpdate{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *NamespaceDelete) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NamespaceDelete{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringNamespace(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) +// Deprecated: Use NamespaceDelete.ProtoReflect.Descriptor instead. +func (*NamespaceDelete) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_namespace_proto_rawDescGZIP(), []int{2} +} + +func (x *NamespaceDelete) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +var File_github_com_containerd_containerd_api_events_namespace_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_events_namespace_proto_rawDesc = []byte{ + 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x1a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0xa8, 0x01, 0x0a, 0x0f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x4e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x2e, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa8, 0x01, + 0x0a, 0x0f, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, + 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x25, 0x0a, 0x0f, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x42, + 0x38, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x3b, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x73, 0xa0, 0xf4, 0x1e, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } -func (m *NamespaceCreate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamespaceCreate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamespaceCreate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthNamespace - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthNamespace - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthNamespace - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthNamespace - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamespaceUpdate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamespaceUpdate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamespaceUpdate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthNamespace - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthNamespace - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthNamespace - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthNamespace - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } +var ( + file_github_com_containerd_containerd_api_events_namespace_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_events_namespace_proto_rawDescData = file_github_com_containerd_containerd_api_events_namespace_proto_rawDesc +) - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamespaceDelete) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamespaceDelete: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamespaceDelete: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) +func file_github_com_containerd_containerd_api_events_namespace_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_events_namespace_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_events_namespace_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_events_namespace_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_events_namespace_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_events_namespace_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_github_com_containerd_containerd_api_events_namespace_proto_goTypes = []interface{}{ + (*NamespaceCreate)(nil), // 0: containerd.events.NamespaceCreate + (*NamespaceUpdate)(nil), // 1: containerd.events.NamespaceUpdate + (*NamespaceDelete)(nil), // 2: containerd.events.NamespaceDelete + nil, // 3: containerd.events.NamespaceCreate.LabelsEntry + nil, // 4: containerd.events.NamespaceUpdate.LabelsEntry +} +var file_github_com_containerd_containerd_api_events_namespace_proto_depIdxs = []int32{ + 3, // 0: containerd.events.NamespaceCreate.labels:type_name -> containerd.events.NamespaceCreate.LabelsEntry + 4, // 1: containerd.events.NamespaceUpdate.labels:type_name -> containerd.events.NamespaceUpdate.LabelsEntry + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_events_namespace_proto_init() } +func file_github_com_containerd_containerd_api_events_namespace_proto_init() { + if File_github_com_containerd_containerd_api_events_namespace_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_events_namespace_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamespaceCreate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipNamespace(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowNamespace - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + file_github_com_containerd_containerd_api_events_namespace_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamespaceUpdate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowNamespace - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowNamespace - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthNamespace - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupNamespace + file_github_com_containerd_containerd_api_events_namespace_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamespaceDelete); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthNamespace - } - if depth == 0 { - return iNdEx, nil } } - return 0, io.ErrUnexpectedEOF + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_events_namespace_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_events_namespace_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_events_namespace_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_events_namespace_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_events_namespace_proto = out.File + file_github_com_containerd_containerd_api_events_namespace_proto_rawDesc = nil + file_github_com_containerd_containerd_api_events_namespace_proto_goTypes = nil + file_github_com_containerd_containerd_api_events_namespace_proto_depIdxs = nil } - -var ( - ErrInvalidLengthNamespace = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowNamespace = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupNamespace = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/containerd/containerd/api/events/namespace.proto b/vendor/github.com/containerd/containerd/api/events/namespace.proto index 53a8ee6306afd..9bae531d7fe58 100644 --- a/vendor/github.com/containerd/containerd/api/events/namespace.proto +++ b/vendor/github.com/containerd/containerd/api/events/namespace.proto @@ -18,8 +18,7 @@ syntax = "proto3"; package containerd.events; -import weak "gogoproto/gogo.proto"; -import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; +import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.plugin.fieldpath_all) = true; diff --git a/vendor/github.com/containerd/containerd/api/events/namespace_fieldpath.pb.go b/vendor/github.com/containerd/containerd/api/events/namespace_fieldpath.pb.go new file mode 100644 index 0000000000000..93d20a6767065 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/namespace_fieldpath.pb.go @@ -0,0 +1,62 @@ +// Code generated by protoc-gen-go-fieldpath. DO NOT EDIT. +// source: github.com/containerd/containerd/api/events/namespace.proto +package events + +import ( + strings "strings" +) + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *NamespaceCreate) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "name": + return string(m.Name), len(m.Name) > 0 + case "labels": + // Labels fields have been special-cased by name. If this breaks, + // add better special casing to fieldpath plugin. + if len(m.Labels) == 0 { + return "", false + } + value, ok := m.Labels[strings.Join(fieldpath[1:], ".")] + return value, ok + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *NamespaceUpdate) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "name": + return string(m.Name), len(m.Name) > 0 + case "labels": + // Labels fields have been special-cased by name. If this breaks, + // add better special casing to fieldpath plugin. + if len(m.Labels) == 0 { + return "", false + } + value, ok := m.Labels[strings.Join(fieldpath[1:], ".")] + return value, ok + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *NamespaceDelete) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "name": + return string(m.Name), len(m.Name) > 0 + } + return "", false +} diff --git a/vendor/github.com/containerd/containerd/api/events/snapshot.pb.go b/vendor/github.com/containerd/containerd/api/events/snapshot.pb.go index bec25c3a7c991..e074f9df21c3c 100644 --- a/vendor/github.com/containerd/containerd/api/events/snapshot.pb.go +++ b/vendor/github.com/containerd/containerd/api/events/snapshot.pb.go @@ -1,848 +1,342 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/events/snapshot.proto package events import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" + _ "github.com/containerd/containerd/protobuf/plugin" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type SnapshotPrepare struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Parent string `protobuf:"bytes,2,opt,name=parent,proto3" json:"parent,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *SnapshotPrepare) Reset() { *m = SnapshotPrepare{} } -func (*SnapshotPrepare) ProtoMessage() {} -func (*SnapshotPrepare) Descriptor() ([]byte, []int) { - return fileDescriptor_bd6c184d3d9aa5f2, []int{0} + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Parent string `protobuf:"bytes,2,opt,name=parent,proto3" json:"parent,omitempty"` + Snapshotter string `protobuf:"bytes,5,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` } -func (m *SnapshotPrepare) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SnapshotPrepare) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SnapshotPrepare.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil + +func (x *SnapshotPrepare) Reset() { + *x = SnapshotPrepare{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_snapshot_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *SnapshotPrepare) XXX_Merge(src proto.Message) { - xxx_messageInfo_SnapshotPrepare.Merge(m, src) -} -func (m *SnapshotPrepare) XXX_Size() int { - return m.Size() -} -func (m *SnapshotPrepare) XXX_DiscardUnknown() { - xxx_messageInfo_SnapshotPrepare.DiscardUnknown(m) -} - -var xxx_messageInfo_SnapshotPrepare proto.InternalMessageInfo -type SnapshotCommit struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *SnapshotPrepare) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SnapshotCommit) Reset() { *m = SnapshotCommit{} } -func (*SnapshotCommit) ProtoMessage() {} -func (*SnapshotCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_bd6c184d3d9aa5f2, []int{1} -} -func (m *SnapshotCommit) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SnapshotCommit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SnapshotCommit.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*SnapshotPrepare) ProtoMessage() {} + +func (x *SnapshotPrepare) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_snapshot_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *SnapshotCommit) XXX_Merge(src proto.Message) { - xxx_messageInfo_SnapshotCommit.Merge(m, src) -} -func (m *SnapshotCommit) XXX_Size() int { - return m.Size() -} -func (m *SnapshotCommit) XXX_DiscardUnknown() { - xxx_messageInfo_SnapshotCommit.DiscardUnknown(m) -} - -var xxx_messageInfo_SnapshotCommit proto.InternalMessageInfo -type SnapshotRemove struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +// Deprecated: Use SnapshotPrepare.ProtoReflect.Descriptor instead. +func (*SnapshotPrepare) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_snapshot_proto_rawDescGZIP(), []int{0} } -func (m *SnapshotRemove) Reset() { *m = SnapshotRemove{} } -func (*SnapshotRemove) ProtoMessage() {} -func (*SnapshotRemove) Descriptor() ([]byte, []int) { - return fileDescriptor_bd6c184d3d9aa5f2, []int{2} -} -func (m *SnapshotRemove) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SnapshotRemove) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SnapshotRemove.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *SnapshotPrepare) GetKey() string { + if x != nil { + return x.Key } -} -func (m *SnapshotRemove) XXX_Merge(src proto.Message) { - xxx_messageInfo_SnapshotRemove.Merge(m, src) -} -func (m *SnapshotRemove) XXX_Size() int { - return m.Size() -} -func (m *SnapshotRemove) XXX_DiscardUnknown() { - xxx_messageInfo_SnapshotRemove.DiscardUnknown(m) -} - -var xxx_messageInfo_SnapshotRemove proto.InternalMessageInfo - -func init() { - proto.RegisterType((*SnapshotPrepare)(nil), "containerd.events.SnapshotPrepare") - proto.RegisterType((*SnapshotCommit)(nil), "containerd.events.SnapshotCommit") - proto.RegisterType((*SnapshotRemove)(nil), "containerd.events.SnapshotRemove") + return "" } -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/events/snapshot.proto", fileDescriptor_bd6c184d3d9aa5f2) +func (x *SnapshotPrepare) GetParent() string { + if x != nil { + return x.Parent + } + return "" } -var fileDescriptor_bd6c184d3d9aa5f2 = []byte{ - // 235 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4a, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, - 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x17, 0xe7, - 0x25, 0x16, 0x14, 0x67, 0xe4, 0x97, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x22, 0x54, - 0xe9, 0x41, 0x54, 0x48, 0x39, 0x10, 0x34, 0x0e, 0xac, 0x35, 0xa9, 0x34, 0x4d, 0xbf, 0x20, 0xa7, - 0x34, 0x3d, 0x33, 0x4f, 0x3f, 0x2d, 0x33, 0x35, 0x27, 0xa5, 0x20, 0xb1, 0x24, 0x03, 0x62, 0xa8, - 0x92, 0x35, 0x17, 0x7f, 0x30, 0xd4, 0x9a, 0x80, 0xa2, 0xd4, 0x82, 0xc4, 0xa2, 0x54, 0x21, 0x01, - 0x2e, 0xe6, 0xec, 0xd4, 0x4a, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x10, 0x53, 0x48, 0x8c, - 0x8b, 0x0d, 0x24, 0x93, 0x57, 0x22, 0xc1, 0x04, 0x16, 0x84, 0xf2, 0x94, 0xcc, 0xb8, 0xf8, 0x60, - 0x9a, 0x9d, 0xf3, 0x73, 0x73, 0x33, 0x4b, 0xb0, 0xe8, 0x15, 0xe2, 0x62, 0xc9, 0x4b, 0xcc, 0x4d, - 0x85, 0xea, 0x04, 0xb3, 0x95, 0x94, 0x10, 0xfa, 0x82, 0x52, 0x73, 0xf3, 0xcb, 0xb0, 0xd8, 0xe9, - 0x14, 0x70, 0xe2, 0xa1, 0x1c, 0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x0d, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, - 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x05, 0x5f, 0xe4, 0x18, 0xa3, - 0x8c, 0x48, 0x08, 0x47, 0x6b, 0x08, 0x15, 0xc1, 0x90, 0xc4, 0x06, 0xf6, 0xb3, 0x31, 0x20, 0x00, - 0x00, 0xff, 0xff, 0x69, 0x66, 0xa9, 0x2a, 0x86, 0x01, 0x00, 0x00, +func (x *SnapshotPrepare) GetSnapshotter() string { + if x != nil { + return x.Snapshotter + } + return "" } -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *SnapshotPrepare) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } +type SnapshotCommit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields - switch fieldpath[0] { - case "key": - return string(m.Key), len(m.Key) > 0 - case "parent": - return string(m.Parent), len(m.Parent) > 0 - } - return "", false + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Snapshotter string `protobuf:"bytes,5,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` } -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *SnapshotCommit) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false +func (x *SnapshotCommit) Reset() { + *x = SnapshotCommit{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_snapshot_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } +} - switch fieldpath[0] { - case "key": - return string(m.Key), len(m.Key) > 0 - case "name": - return string(m.Name), len(m.Name) > 0 - } - return "", false +func (x *SnapshotCommit) String() string { + return protoimpl.X.MessageStringOf(x) } -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *SnapshotRemove) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } +func (*SnapshotCommit) ProtoMessage() {} - switch fieldpath[0] { - case "key": - return string(m.Key), len(m.Key) > 0 - } - return "", false -} -func (m *SnapshotPrepare) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *SnapshotCommit) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_snapshot_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *SnapshotPrepare) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use SnapshotCommit.ProtoReflect.Descriptor instead. +func (*SnapshotCommit) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_snapshot_proto_rawDescGZIP(), []int{1} } -func (m *SnapshotPrepare) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Parent) > 0 { - i -= len(m.Parent) - copy(dAtA[i:], m.Parent) - i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Parent))) - i-- - dAtA[i] = 0x12 +func (x *SnapshotCommit) GetKey() string { + if x != nil { + return x.Key } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return "" } -func (m *SnapshotCommit) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *SnapshotCommit) GetName() string { + if x != nil { + return x.Name } - return dAtA[:n], nil -} - -func (m *SnapshotCommit) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return "" } -func (m *SnapshotCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa +func (x *SnapshotCommit) GetSnapshotter() string { + if x != nil { + return x.Snapshotter } - return len(dAtA) - i, nil + return "" } -func (m *SnapshotRemove) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} +type SnapshotRemove struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *SnapshotRemove) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Snapshotter string `protobuf:"bytes,5,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` } -func (m *SnapshotRemove) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) +func (x *SnapshotRemove) Reset() { + *x = SnapshotRemove{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_snapshot_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil } -func encodeVarintSnapshot(dAtA []byte, offset int, v uint64) int { - offset -= sovSnapshot(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *SnapshotPrepare) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovSnapshot(uint64(l)) - } - l = len(m.Parent) - if l > 0 { - n += 1 + l + sovSnapshot(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n +func (x *SnapshotRemove) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SnapshotCommit) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovSnapshot(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovSnapshot(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} +func (*SnapshotRemove) ProtoMessage() {} -func (m *SnapshotRemove) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovSnapshot(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) +func (x *SnapshotRemove) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_snapshot_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return n + return mi.MessageOf(x) } -func sovSnapshot(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozSnapshot(x uint64) (n int) { - return sovSnapshot(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *SnapshotPrepare) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SnapshotPrepare{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Parent:` + fmt.Sprintf("%v", this.Parent) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *SnapshotCommit) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SnapshotCommit{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *SnapshotRemove) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SnapshotRemove{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringSnapshot(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) +// Deprecated: Use SnapshotRemove.ProtoReflect.Descriptor instead. +func (*SnapshotRemove) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_snapshot_proto_rawDescGZIP(), []int{2} +} + +func (x *SnapshotRemove) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *SnapshotRemove) GetSnapshotter() string { + if x != nil { + return x.Snapshotter + } + return "" +} + +var File_github_com_containerd_containerd_api_events_snapshot_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_events_snapshot_proto_rawDesc = []byte{ + 0x0a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x73, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x1a, + 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x5d, 0x0a, 0x0f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x50, 0x72, 0x65, + 0x70, 0x61, 0x72, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x20, + 0x0a, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, + 0x22, 0x58, 0x0a, 0x0e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x43, 0x6f, 0x6d, 0x6d, + 0x69, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x22, 0x44, 0x0a, 0x0e, 0x53, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x20, + 0x0a, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, + 0x42, 0x38, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x3b, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0xa0, 0xf4, 0x1e, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } -func (m *SnapshotPrepare) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SnapshotPrepare: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SnapshotPrepare: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Parent = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshot(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshot - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SnapshotCommit) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SnapshotCommit: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SnapshotCommit: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshot(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshot - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } +var ( + file_github_com_containerd_containerd_api_events_snapshot_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_events_snapshot_proto_rawDescData = file_github_com_containerd_containerd_api_events_snapshot_proto_rawDesc +) - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SnapshotRemove) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF +func file_github_com_containerd_containerd_api_events_snapshot_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_events_snapshot_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_events_snapshot_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_events_snapshot_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_events_snapshot_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_events_snapshot_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_github_com_containerd_containerd_api_events_snapshot_proto_goTypes = []interface{}{ + (*SnapshotPrepare)(nil), // 0: containerd.events.SnapshotPrepare + (*SnapshotCommit)(nil), // 1: containerd.events.SnapshotCommit + (*SnapshotRemove)(nil), // 2: containerd.events.SnapshotRemove +} +var file_github_com_containerd_containerd_api_events_snapshot_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_events_snapshot_proto_init() } +func file_github_com_containerd_containerd_api_events_snapshot_proto_init() { + if File_github_com_containerd_containerd_api_events_snapshot_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_events_snapshot_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SnapshotPrepare); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SnapshotRemove: wiretype end group for non-group") } - if fieldNum <= 0 { - return fmt.Errorf("proto: SnapshotRemove: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshot - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshot - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshot - } - if postIndex > l { - return io.ErrUnexpectedEOF + file_github_com_containerd_containerd_api_events_snapshot_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SnapshotCommit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshot(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshot - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipSnapshot(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnapshot - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + file_github_com_containerd_containerd_api_events_snapshot_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SnapshotRemove); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnapshot - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnapshot - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthSnapshot - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupSnapshot - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthSnapshot - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_events_snapshot_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_events_snapshot_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_events_snapshot_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_events_snapshot_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_events_snapshot_proto = out.File + file_github_com_containerd_containerd_api_events_snapshot_proto_rawDesc = nil + file_github_com_containerd_containerd_api_events_snapshot_proto_goTypes = nil + file_github_com_containerd_containerd_api_events_snapshot_proto_depIdxs = nil } - -var ( - ErrInvalidLengthSnapshot = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowSnapshot = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupSnapshot = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/containerd/containerd/api/events/snapshot.proto b/vendor/github.com/containerd/containerd/api/events/snapshot.proto index eb1f06725c702..effd869136791 100644 --- a/vendor/github.com/containerd/containerd/api/events/snapshot.proto +++ b/vendor/github.com/containerd/containerd/api/events/snapshot.proto @@ -18,7 +18,7 @@ syntax = "proto3"; package containerd.events; -import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; +import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.plugin.fieldpath_all) = true; @@ -26,13 +26,16 @@ option (containerd.plugin.fieldpath_all) = true; message SnapshotPrepare { string key = 1; string parent = 2; + string snapshotter = 5; } message SnapshotCommit { string key = 1; string name = 2; + string snapshotter = 5; } message SnapshotRemove { string key = 1; + string snapshotter = 5; } diff --git a/vendor/github.com/containerd/containerd/api/events/snapshot_fieldpath.pb.go b/vendor/github.com/containerd/containerd/api/events/snapshot_fieldpath.pb.go new file mode 100644 index 0000000000000..c89d1ab4a3364 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/snapshot_fieldpath.pb.go @@ -0,0 +1,52 @@ +// Code generated by protoc-gen-go-fieldpath. DO NOT EDIT. +// source: github.com/containerd/containerd/api/events/snapshot.proto +package events + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *SnapshotPrepare) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "key": + return string(m.Key), len(m.Key) > 0 + case "parent": + return string(m.Parent), len(m.Parent) > 0 + case "snapshotter": + return string(m.Snapshotter), len(m.Snapshotter) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *SnapshotCommit) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "key": + return string(m.Key), len(m.Key) > 0 + case "name": + return string(m.Name), len(m.Name) > 0 + case "snapshotter": + return string(m.Snapshotter), len(m.Snapshotter) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *SnapshotRemove) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "key": + return string(m.Key), len(m.Key) > 0 + case "snapshotter": + return string(m.Snapshotter), len(m.Snapshotter) > 0 + } + return "", false +} diff --git a/vendor/github.com/containerd/containerd/api/events/task.pb.go b/vendor/github.com/containerd/containerd/api/events/task.pb.go index f8f3a3f3d3070..33fd521b6c877 100644 --- a/vendor/github.com/containerd/containerd/api/events/task.pb.go +++ b/vendor/github.com/containerd/containerd/api/events/task.pb.go @@ -1,3261 +1,1020 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/events/task.proto package events import ( - fmt "fmt" types "github.com/containerd/containerd/api/types" - proto "github.com/gogo/protobuf/proto" - _ "github.com/gogo/protobuf/types" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - io "io" - math "math" - math_bits "math/bits" + _ "github.com/containerd/containerd/protobuf/plugin" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - strings "strings" - time "time" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type TaskCreate struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - Bundle string `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"` - Rootfs []*types.Mount `protobuf:"bytes,3,rep,name=rootfs,proto3" json:"rootfs,omitempty"` - IO *TaskIO `protobuf:"bytes,4,opt,name=io,proto3" json:"io,omitempty"` - Checkpoint string `protobuf:"bytes,5,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` - Pid uint32 `protobuf:"varint,6,opt,name=pid,proto3" json:"pid,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TaskCreate) Reset() { *m = TaskCreate{} } -func (*TaskCreate) ProtoMessage() {} -func (*TaskCreate) Descriptor() ([]byte, []int) { - return fileDescriptor_8db0813f7adfb63c, []int{0} -} -func (m *TaskCreate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TaskCreate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TaskCreate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TaskCreate) XXX_Merge(src proto.Message) { - xxx_messageInfo_TaskCreate.Merge(m, src) -} -func (m *TaskCreate) XXX_Size() int { - return m.Size() -} -func (m *TaskCreate) XXX_DiscardUnknown() { - xxx_messageInfo_TaskCreate.DiscardUnknown(m) -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -var xxx_messageInfo_TaskCreate proto.InternalMessageInfo - -type TaskStart struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + Bundle string `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"` + Rootfs []*types.Mount `protobuf:"bytes,3,rep,name=rootfs,proto3" json:"rootfs,omitempty"` + IO *TaskIO `protobuf:"bytes,4,opt,name=io,proto3" json:"io,omitempty"` + Checkpoint string `protobuf:"bytes,5,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` + Pid uint32 `protobuf:"varint,6,opt,name=pid,proto3" json:"pid,omitempty"` } -func (m *TaskStart) Reset() { *m = TaskStart{} } -func (*TaskStart) ProtoMessage() {} -func (*TaskStart) Descriptor() ([]byte, []int) { - return fileDescriptor_8db0813f7adfb63c, []int{1} -} -func (m *TaskStart) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TaskStart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TaskStart.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *TaskCreate) Reset() { + *x = TaskCreate{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *TaskStart) XXX_Merge(src proto.Message) { - xxx_messageInfo_TaskStart.Merge(m, src) -} -func (m *TaskStart) XXX_Size() int { - return m.Size() -} -func (m *TaskStart) XXX_DiscardUnknown() { - xxx_messageInfo_TaskStart.DiscardUnknown(m) -} -var xxx_messageInfo_TaskStart proto.InternalMessageInfo - -type TaskDelete struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` - ExitStatus uint32 `protobuf:"varint,3,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` - ExitedAt time.Time `protobuf:"bytes,4,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"` - // id is the specific exec. By default if omitted will be `""` thus matches - // the init exec of the task matching `container_id`. - ID string `protobuf:"bytes,5,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *TaskCreate) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *TaskDelete) Reset() { *m = TaskDelete{} } -func (*TaskDelete) ProtoMessage() {} -func (*TaskDelete) Descriptor() ([]byte, []int) { - return fileDescriptor_8db0813f7adfb63c, []int{2} -} -func (m *TaskDelete) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TaskDelete) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TaskDelete.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*TaskCreate) ProtoMessage() {} + +func (x *TaskCreate) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *TaskDelete) XXX_Merge(src proto.Message) { - xxx_messageInfo_TaskDelete.Merge(m, src) -} -func (m *TaskDelete) XXX_Size() int { - return m.Size() -} -func (m *TaskDelete) XXX_DiscardUnknown() { - xxx_messageInfo_TaskDelete.DiscardUnknown(m) -} - -var xxx_messageInfo_TaskDelete proto.InternalMessageInfo -type TaskIO struct { - Stdin string `protobuf:"bytes,1,opt,name=stdin,proto3" json:"stdin,omitempty"` - Stdout string `protobuf:"bytes,2,opt,name=stdout,proto3" json:"stdout,omitempty"` - Stderr string `protobuf:"bytes,3,opt,name=stderr,proto3" json:"stderr,omitempty"` - Terminal bool `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +// Deprecated: Use TaskCreate.ProtoReflect.Descriptor instead. +func (*TaskCreate) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_task_proto_rawDescGZIP(), []int{0} } -func (m *TaskIO) Reset() { *m = TaskIO{} } -func (*TaskIO) ProtoMessage() {} -func (*TaskIO) Descriptor() ([]byte, []int) { - return fileDescriptor_8db0813f7adfb63c, []int{3} -} -func (m *TaskIO) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TaskIO) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TaskIO.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *TaskCreate) GetContainerID() string { + if x != nil { + return x.ContainerID } -} -func (m *TaskIO) XXX_Merge(src proto.Message) { - xxx_messageInfo_TaskIO.Merge(m, src) -} -func (m *TaskIO) XXX_Size() int { - return m.Size() -} -func (m *TaskIO) XXX_DiscardUnknown() { - xxx_messageInfo_TaskIO.DiscardUnknown(m) + return "" } -var xxx_messageInfo_TaskIO proto.InternalMessageInfo - -type TaskExit struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` - Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` - ExitStatus uint32 `protobuf:"varint,4,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` - ExitedAt time.Time `protobuf:"bytes,5,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *TaskCreate) GetBundle() string { + if x != nil { + return x.Bundle + } + return "" } -func (m *TaskExit) Reset() { *m = TaskExit{} } -func (*TaskExit) ProtoMessage() {} -func (*TaskExit) Descriptor() ([]byte, []int) { - return fileDescriptor_8db0813f7adfb63c, []int{4} -} -func (m *TaskExit) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TaskExit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TaskExit.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *TaskCreate) GetRootfs() []*types.Mount { + if x != nil { + return x.Rootfs } -} -func (m *TaskExit) XXX_Merge(src proto.Message) { - xxx_messageInfo_TaskExit.Merge(m, src) -} -func (m *TaskExit) XXX_Size() int { - return m.Size() -} -func (m *TaskExit) XXX_DiscardUnknown() { - xxx_messageInfo_TaskExit.DiscardUnknown(m) + return nil } -var xxx_messageInfo_TaskExit proto.InternalMessageInfo - -type TaskOOM struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *TaskCreate) GetIO() *TaskIO { + if x != nil { + return x.IO + } + return nil } -func (m *TaskOOM) Reset() { *m = TaskOOM{} } -func (*TaskOOM) ProtoMessage() {} -func (*TaskOOM) Descriptor() ([]byte, []int) { - return fileDescriptor_8db0813f7adfb63c, []int{5} -} -func (m *TaskOOM) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TaskOOM) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TaskOOM.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *TaskCreate) GetCheckpoint() string { + if x != nil { + return x.Checkpoint } + return "" } -func (m *TaskOOM) XXX_Merge(src proto.Message) { - xxx_messageInfo_TaskOOM.Merge(m, src) -} -func (m *TaskOOM) XXX_Size() int { - return m.Size() -} -func (m *TaskOOM) XXX_DiscardUnknown() { - xxx_messageInfo_TaskOOM.DiscardUnknown(m) + +func (x *TaskCreate) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 } -var xxx_messageInfo_TaskOOM proto.InternalMessageInfo +type TaskStart struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -type TaskExecAdded struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` } -func (m *TaskExecAdded) Reset() { *m = TaskExecAdded{} } -func (*TaskExecAdded) ProtoMessage() {} -func (*TaskExecAdded) Descriptor() ([]byte, []int) { - return fileDescriptor_8db0813f7adfb63c, []int{6} -} -func (m *TaskExecAdded) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TaskExecAdded) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TaskExecAdded.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *TaskStart) Reset() { + *x = TaskStart{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *TaskExecAdded) XXX_Merge(src proto.Message) { - xxx_messageInfo_TaskExecAdded.Merge(m, src) -} -func (m *TaskExecAdded) XXX_Size() int { - return m.Size() -} -func (m *TaskExecAdded) XXX_DiscardUnknown() { - xxx_messageInfo_TaskExecAdded.DiscardUnknown(m) -} - -var xxx_messageInfo_TaskExecAdded proto.InternalMessageInfo -type TaskExecStarted struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *TaskStart) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *TaskExecStarted) Reset() { *m = TaskExecStarted{} } -func (*TaskExecStarted) ProtoMessage() {} -func (*TaskExecStarted) Descriptor() ([]byte, []int) { - return fileDescriptor_8db0813f7adfb63c, []int{7} -} -func (m *TaskExecStarted) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TaskExecStarted) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TaskExecStarted.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*TaskStart) ProtoMessage() {} + +func (x *TaskStart) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *TaskExecStarted) XXX_Merge(src proto.Message) { - xxx_messageInfo_TaskExecStarted.Merge(m, src) -} -func (m *TaskExecStarted) XXX_Size() int { - return m.Size() -} -func (m *TaskExecStarted) XXX_DiscardUnknown() { - xxx_messageInfo_TaskExecStarted.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_TaskExecStarted proto.InternalMessageInfo - -type TaskPaused struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +// Deprecated: Use TaskStart.ProtoReflect.Descriptor instead. +func (*TaskStart) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_task_proto_rawDescGZIP(), []int{1} } -func (m *TaskPaused) Reset() { *m = TaskPaused{} } -func (*TaskPaused) ProtoMessage() {} -func (*TaskPaused) Descriptor() ([]byte, []int) { - return fileDescriptor_8db0813f7adfb63c, []int{8} -} -func (m *TaskPaused) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TaskPaused) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TaskPaused.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *TaskStart) GetContainerID() string { + if x != nil { + return x.ContainerID } -} -func (m *TaskPaused) XXX_Merge(src proto.Message) { - xxx_messageInfo_TaskPaused.Merge(m, src) -} -func (m *TaskPaused) XXX_Size() int { - return m.Size() -} -func (m *TaskPaused) XXX_DiscardUnknown() { - xxx_messageInfo_TaskPaused.DiscardUnknown(m) + return "" } -var xxx_messageInfo_TaskPaused proto.InternalMessageInfo +func (x *TaskStart) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} -type TaskResumed struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +type TaskDelete struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` + ExitStatus uint32 `protobuf:"varint,3,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=exited_at,json=exitedAt,proto3" json:"exited_at,omitempty"` + // id is the specific exec. By default if omitted will be `""` thus matches + // the init exec of the task matching `container_id`. + ID string `protobuf:"bytes,5,opt,name=id,proto3" json:"id,omitempty"` } -func (m *TaskResumed) Reset() { *m = TaskResumed{} } -func (*TaskResumed) ProtoMessage() {} -func (*TaskResumed) Descriptor() ([]byte, []int) { - return fileDescriptor_8db0813f7adfb63c, []int{9} -} -func (m *TaskResumed) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TaskResumed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TaskResumed.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *TaskDelete) Reset() { + *x = TaskDelete{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *TaskResumed) XXX_Merge(src proto.Message) { - xxx_messageInfo_TaskResumed.Merge(m, src) -} -func (m *TaskResumed) XXX_Size() int { - return m.Size() -} -func (m *TaskResumed) XXX_DiscardUnknown() { - xxx_messageInfo_TaskResumed.DiscardUnknown(m) -} - -var xxx_messageInfo_TaskResumed proto.InternalMessageInfo -type TaskCheckpointed struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - Checkpoint string `protobuf:"bytes,2,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *TaskDelete) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *TaskCheckpointed) Reset() { *m = TaskCheckpointed{} } -func (*TaskCheckpointed) ProtoMessage() {} -func (*TaskCheckpointed) Descriptor() ([]byte, []int) { - return fileDescriptor_8db0813f7adfb63c, []int{10} -} -func (m *TaskCheckpointed) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TaskCheckpointed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TaskCheckpointed.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*TaskDelete) ProtoMessage() {} + +func (x *TaskDelete) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *TaskCheckpointed) XXX_Merge(src proto.Message) { - xxx_messageInfo_TaskCheckpointed.Merge(m, src) -} -func (m *TaskCheckpointed) XXX_Size() int { - return m.Size() -} -func (m *TaskCheckpointed) XXX_DiscardUnknown() { - xxx_messageInfo_TaskCheckpointed.DiscardUnknown(m) -} - -var xxx_messageInfo_TaskCheckpointed proto.InternalMessageInfo - -func init() { - proto.RegisterType((*TaskCreate)(nil), "containerd.events.TaskCreate") - proto.RegisterType((*TaskStart)(nil), "containerd.events.TaskStart") - proto.RegisterType((*TaskDelete)(nil), "containerd.events.TaskDelete") - proto.RegisterType((*TaskIO)(nil), "containerd.events.TaskIO") - proto.RegisterType((*TaskExit)(nil), "containerd.events.TaskExit") - proto.RegisterType((*TaskOOM)(nil), "containerd.events.TaskOOM") - proto.RegisterType((*TaskExecAdded)(nil), "containerd.events.TaskExecAdded") - proto.RegisterType((*TaskExecStarted)(nil), "containerd.events.TaskExecStarted") - proto.RegisterType((*TaskPaused)(nil), "containerd.events.TaskPaused") - proto.RegisterType((*TaskResumed)(nil), "containerd.events.TaskResumed") - proto.RegisterType((*TaskCheckpointed)(nil), "containerd.events.TaskCheckpointed") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/events/task.proto", fileDescriptor_8db0813f7adfb63c) -} - -var fileDescriptor_8db0813f7adfb63c = []byte{ - // 644 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x95, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xc7, 0x63, 0xa7, 0x75, 0xd3, 0x09, 0x55, 0x8b, 0x55, 0x95, 0x90, 0x83, 0x1d, 0x99, 0x4b, - 0x4e, 0xb6, 0x08, 0x12, 0x17, 0x84, 0xd4, 0xa4, 0xe1, 0x90, 0x43, 0x95, 0xe2, 0xf6, 0x50, 0x71, - 0x89, 0x36, 0xd9, 0x4d, 0xb2, 0x34, 0xf1, 0x5a, 0xf6, 0x18, 0x15, 0x89, 0x03, 0x8f, 0xc0, 0x23, - 0xf0, 0x38, 0x3d, 0x20, 0xc4, 0x91, 0x53, 0xa0, 0x7e, 0x00, 0x4e, 0x3c, 0x00, 0x5a, 0xaf, 0x93, - 0xb6, 0x54, 0x7c, 0x59, 0xe2, 0x94, 0x9d, 0xd9, 0xd9, 0xff, 0xec, 0xfc, 0x76, 0x3c, 0x81, 0xc7, - 0x13, 0x8e, 0xd3, 0x64, 0xe8, 0x8e, 0xc4, 0xdc, 0x1b, 0x89, 0x00, 0x09, 0x0f, 0x58, 0x44, 0xaf, - 0x2f, 0x49, 0xc8, 0x3d, 0xf6, 0x8a, 0x05, 0x18, 0x7b, 0x48, 0xe2, 0x33, 0x37, 0x8c, 0x04, 0x0a, - 0xf3, 0xee, 0x55, 0x84, 0xab, 0x76, 0xeb, 0xbb, 0x13, 0x31, 0x11, 0xd9, 0xae, 0x27, 0x57, 0x2a, - 0xb0, 0x6e, 0x4f, 0x84, 0x98, 0xcc, 0x98, 0x97, 0x59, 0xc3, 0x64, 0xec, 0x21, 0x9f, 0xb3, 0x18, - 0xc9, 0x3c, 0xcc, 0x03, 0xfe, 0xee, 0x06, 0xf8, 0x3a, 0x64, 0xb1, 0x37, 0x17, 0x49, 0x80, 0xf9, - 0xb9, 0xfd, 0x3f, 0x9e, 0x5b, 0xa5, 0x0c, 0x67, 0xc9, 0x84, 0x07, 0xde, 0x98, 0xb3, 0x19, 0x0d, - 0x09, 0x4e, 0x95, 0x82, 0xf3, 0x4d, 0x03, 0x38, 0x21, 0xf1, 0xd9, 0x41, 0xc4, 0x08, 0x32, 0xb3, - 0x05, 0x77, 0x56, 0x87, 0x07, 0x9c, 0xd6, 0xb4, 0x86, 0xd6, 0xdc, 0xec, 0x6c, 0xa7, 0x0b, 0xbb, - 0x7a, 0xb0, 0xf4, 0xf7, 0xba, 0x7e, 0x75, 0x15, 0xd4, 0xa3, 0xe6, 0x1e, 0x18, 0xc3, 0x24, 0xa0, - 0x33, 0x56, 0xd3, 0x65, 0xb4, 0x9f, 0x5b, 0xa6, 0x07, 0x46, 0x24, 0x04, 0x8e, 0xe3, 0x5a, 0xb9, - 0x51, 0x6e, 0x56, 0x5b, 0xf7, 0xdc, 0x6b, 0xbc, 0xb2, 0x5a, 0xdc, 0x43, 0x59, 0x8b, 0x9f, 0x87, - 0x99, 0x0f, 0x41, 0xe7, 0xa2, 0xb6, 0xd6, 0xd0, 0x9a, 0xd5, 0xd6, 0x7d, 0xf7, 0x16, 0x5c, 0x57, - 0xde, 0xb3, 0xd7, 0xef, 0x18, 0xe9, 0xc2, 0xd6, 0x7b, 0x7d, 0x5f, 0xe7, 0xc2, 0xb4, 0x00, 0x46, - 0x53, 0x36, 0x3a, 0x0b, 0x05, 0x0f, 0xb0, 0xb6, 0x9e, 0xe5, 0xbf, 0xe6, 0x31, 0x77, 0xa0, 0x1c, - 0x72, 0x5a, 0x33, 0x1a, 0x5a, 0x73, 0xcb, 0x97, 0x4b, 0xe7, 0x39, 0x6c, 0x4a, 0x9d, 0x63, 0x24, - 0x11, 0x16, 0x2a, 0x37, 0x97, 0xd4, 0xaf, 0x24, 0x3f, 0xe6, 0x0c, 0xbb, 0x6c, 0xc6, 0x0a, 0x32, - 0xbc, 0x25, 0x6a, 0xda, 0x50, 0x65, 0xe7, 0x1c, 0x07, 0x31, 0x12, 0x4c, 0x24, 0x42, 0xb9, 0x03, - 0xd2, 0x75, 0x9c, 0x79, 0xcc, 0x36, 0x6c, 0x4a, 0x8b, 0xd1, 0x01, 0xc1, 0x1c, 0x5a, 0xdd, 0x55, - 0x8d, 0xe6, 0x2e, 0x5f, 0xdd, 0x3d, 0x59, 0x36, 0x5a, 0xa7, 0x72, 0xb1, 0xb0, 0x4b, 0xef, 0xbe, - 0xd8, 0x9a, 0x5f, 0x51, 0xc7, 0xda, 0x68, 0xee, 0x81, 0xce, 0xa9, 0xa2, 0x96, 0x53, 0xed, 0xfa, - 0x3a, 0xa7, 0xce, 0x4b, 0x30, 0x14, 0x6b, 0x73, 0x17, 0xd6, 0x63, 0xa4, 0x3c, 0x50, 0x45, 0xf8, - 0xca, 0x90, 0x2f, 0x1e, 0x23, 0x15, 0x09, 0x2e, 0x5f, 0x5c, 0x59, 0xb9, 0x9f, 0x45, 0x51, 0x76, - 0x5d, 0xe5, 0x67, 0x51, 0x64, 0xd6, 0xa1, 0x82, 0x2c, 0x9a, 0xf3, 0x80, 0xcc, 0xb2, 0x9b, 0x56, - 0xfc, 0x95, 0xed, 0x7c, 0xd0, 0xa0, 0x22, 0x93, 0x3d, 0x3b, 0xe7, 0x58, 0xb0, 0xfd, 0xf4, 0x9c, - 0xdc, 0x8d, 0x22, 0x96, 0x48, 0xcb, 0xbf, 0x44, 0xba, 0xf6, 0x7b, 0xa4, 0xeb, 0x45, 0x90, 0x3a, - 0x4f, 0x61, 0x43, 0x56, 0xd3, 0xef, 0x1f, 0x16, 0x29, 0xc6, 0x99, 0xc2, 0x96, 0x82, 0xc1, 0x46, - 0x6d, 0x4a, 0x19, 0x2d, 0x44, 0xe4, 0x01, 0x6c, 0xb0, 0x73, 0x36, 0x1a, 0xac, 0xb0, 0x40, 0xba, - 0xb0, 0x0d, 0xa9, 0xd9, 0xeb, 0xfa, 0x86, 0xdc, 0xea, 0x51, 0xe7, 0x0d, 0x6c, 0x2f, 0x33, 0x65, - 0xdf, 0xc2, 0x7f, 0xcc, 0x75, 0xfb, 0x29, 0x9c, 0x7d, 0xf5, 0xc5, 0x1c, 0x91, 0x24, 0x2e, 0x96, - 0xd8, 0x69, 0x43, 0x55, 0x2a, 0xf8, 0x2c, 0x4e, 0xe6, 0x05, 0x25, 0xc6, 0xb0, 0x93, 0x8d, 0xbe, - 0xd5, 0xb8, 0x28, 0xc8, 0xe0, 0xe6, 0x10, 0xd2, 0x7f, 0x1e, 0x42, 0x9d, 0xa3, 0x8b, 0x4b, 0xab, - 0xf4, 0xf9, 0xd2, 0x2a, 0xbd, 0x4d, 0x2d, 0xed, 0x22, 0xb5, 0xb4, 0x4f, 0xa9, 0xa5, 0x7d, 0x4d, - 0x2d, 0xed, 0xfd, 0x77, 0x4b, 0x7b, 0xd1, 0xfa, 0x87, 0x7f, 0x9f, 0x27, 0xea, 0xe7, 0xb4, 0x74, - 0x5a, 0x1e, 0x1a, 0x59, 0x47, 0x3e, 0xfa, 0x11, 0x00, 0x00, 0xff, 0xff, 0xc5, 0x58, 0x0f, 0xec, - 0xbe, 0x06, 0x00, 0x00, -} - -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *TaskCreate) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - switch fieldpath[0] { - // unhandled: rootfs - // unhandled: pid - case "container_id": - return string(m.ContainerID), len(m.ContainerID) > 0 - case "bundle": - return string(m.Bundle), len(m.Bundle) > 0 - case "io": - // NOTE(stevvooe): This is probably not correct in many cases. - // We assume that the target message also implements the Field - // method, which isn't likely true in a lot of cases. - // - // If you have a broken build and have found this comment, - // you may be closer to a solution. - if m.IO == nil { - return "", false - } +// Deprecated: Use TaskDelete.ProtoReflect.Descriptor instead. +func (*TaskDelete) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_task_proto_rawDescGZIP(), []int{2} +} - return m.IO.Field(fieldpath[1:]) - case "checkpoint": - return string(m.Checkpoint), len(m.Checkpoint) > 0 +func (x *TaskDelete) GetContainerID() string { + if x != nil { + return x.ContainerID } - return "", false + return "" } -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *TaskStart) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false +func (x *TaskDelete) GetPid() uint32 { + if x != nil { + return x.Pid } + return 0 +} - switch fieldpath[0] { - // unhandled: pid - case "container_id": - return string(m.ContainerID), len(m.ContainerID) > 0 +func (x *TaskDelete) GetExitStatus() uint32 { + if x != nil { + return x.ExitStatus } - return "", false + return 0 } -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *TaskDelete) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false +func (x *TaskDelete) GetExitedAt() *timestamppb.Timestamp { + if x != nil { + return x.ExitedAt } + return nil +} - switch fieldpath[0] { - // unhandled: pid - // unhandled: exit_status - // unhandled: exited_at - case "container_id": - return string(m.ContainerID), len(m.ContainerID) > 0 - case "id": - return string(m.ID), len(m.ID) > 0 +func (x *TaskDelete) GetID() string { + if x != nil { + return x.ID } - return "", false + return "" } -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *TaskIO) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } +type TaskIO struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields - switch fieldpath[0] { - case "stdin": - return string(m.Stdin), len(m.Stdin) > 0 - case "stdout": - return string(m.Stdout), len(m.Stdout) > 0 - case "stderr": - return string(m.Stderr), len(m.Stderr) > 0 - case "terminal": - return fmt.Sprint(m.Terminal), true - } - return "", false + Stdin string `protobuf:"bytes,1,opt,name=stdin,proto3" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,2,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,3,opt,name=stderr,proto3" json:"stderr,omitempty"` + Terminal bool `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"` } -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *TaskExit) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false +func (x *TaskIO) Reset() { + *x = TaskIO{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } +} - switch fieldpath[0] { - // unhandled: pid - // unhandled: exit_status - // unhandled: exited_at - case "container_id": - return string(m.ContainerID), len(m.ContainerID) > 0 - case "id": - return string(m.ID), len(m.ID) > 0 - } - return "", false +func (x *TaskIO) String() string { + return protoimpl.X.MessageStringOf(x) } -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *TaskOOM) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } +func (*TaskIO) ProtoMessage() {} - switch fieldpath[0] { - case "container_id": - return string(m.ContainerID), len(m.ContainerID) > 0 +func (x *TaskIO) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "", false + return mi.MessageOf(x) } -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *TaskExecAdded) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } +// Deprecated: Use TaskIO.ProtoReflect.Descriptor instead. +func (*TaskIO) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_task_proto_rawDescGZIP(), []int{3} +} - switch fieldpath[0] { - case "container_id": - return string(m.ContainerID), len(m.ContainerID) > 0 - case "exec_id": - return string(m.ExecID), len(m.ExecID) > 0 +func (x *TaskIO) GetStdin() string { + if x != nil { + return x.Stdin } - return "", false + return "" } -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *TaskExecStarted) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false +func (x *TaskIO) GetStdout() string { + if x != nil { + return x.Stdout } + return "" +} - switch fieldpath[0] { - // unhandled: pid - case "container_id": - return string(m.ContainerID), len(m.ContainerID) > 0 - case "exec_id": - return string(m.ExecID), len(m.ExecID) > 0 +func (x *TaskIO) GetStderr() string { + if x != nil { + return x.Stderr } - return "", false + return "" } -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *TaskPaused) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false +func (x *TaskIO) GetTerminal() bool { + if x != nil { + return x.Terminal } + return false +} - switch fieldpath[0] { - case "container_id": - return string(m.ContainerID), len(m.ContainerID) > 0 - } - return "", false +type TaskExit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` + ExitStatus uint32 `protobuf:"varint,4,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=exited_at,json=exitedAt,proto3" json:"exited_at,omitempty"` } -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *TaskResumed) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false +func (x *TaskExit) Reset() { + *x = TaskExit{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } +} - switch fieldpath[0] { - case "container_id": - return string(m.ContainerID), len(m.ContainerID) > 0 - } - return "", false +func (x *TaskExit) String() string { + return protoimpl.X.MessageStringOf(x) } -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *TaskCheckpointed) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } +func (*TaskExit) ProtoMessage() {} - switch fieldpath[0] { - case "container_id": - return string(m.ContainerID), len(m.ContainerID) > 0 - case "checkpoint": - return string(m.Checkpoint), len(m.Checkpoint) > 0 - } - return "", false -} -func (m *TaskCreate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *TaskExit) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *TaskCreate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use TaskExit.ProtoReflect.Descriptor instead. +func (*TaskExit) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_task_proto_rawDescGZIP(), []int{4} } -func (m *TaskCreate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Pid != 0 { - i = encodeVarintTask(dAtA, i, uint64(m.Pid)) - i-- - dAtA[i] = 0x30 - } - if len(m.Checkpoint) > 0 { - i -= len(m.Checkpoint) - copy(dAtA[i:], m.Checkpoint) - i = encodeVarintTask(dAtA, i, uint64(len(m.Checkpoint))) - i-- - dAtA[i] = 0x2a - } - if m.IO != nil { - { - size, err := m.IO.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTask(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if len(m.Rootfs) > 0 { - for iNdEx := len(m.Rootfs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Rootfs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTask(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Bundle) > 0 { - i -= len(m.Bundle) - copy(dAtA[i:], m.Bundle) - i = encodeVarintTask(dAtA, i, uint64(len(m.Bundle))) - i-- - dAtA[i] = 0x12 +func (x *TaskExit) GetContainerID() string { + if x != nil { + return x.ContainerID } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return "" } -func (m *TaskStart) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *TaskExit) GetID() string { + if x != nil { + return x.ID } - return dAtA[:n], nil + return "" } -func (m *TaskStart) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *TaskExit) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 } -func (m *TaskStart) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) +func (x *TaskExit) GetExitStatus() uint32 { + if x != nil { + return x.ExitStatus } - if m.Pid != 0 { - i = encodeVarintTask(dAtA, i, uint64(m.Pid)) - i-- - dAtA[i] = 0x10 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return 0 } -func (m *TaskDelete) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *TaskExit) GetExitedAt() *timestamppb.Timestamp { + if x != nil { + return x.ExitedAt } - return dAtA[:n], nil + return nil } -func (m *TaskDelete) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +type TaskOOM struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *TaskDelete) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0x2a - } - n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt):]) - if err2 != nil { - return 0, err2 - } - i -= n2 - i = encodeVarintTask(dAtA, i, uint64(n2)) - i-- - dAtA[i] = 0x22 - if m.ExitStatus != 0 { - i = encodeVarintTask(dAtA, i, uint64(m.ExitStatus)) - i-- - dAtA[i] = 0x18 - } - if m.Pid != 0 { - i = encodeVarintTask(dAtA, i, uint64(m.Pid)) - i-- - dAtA[i] = 0x10 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` } -func (m *TaskIO) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *TaskOOM) Reset() { + *x = TaskOOM{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *TaskIO) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *TaskOOM) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *TaskIO) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Terminal { - i-- - if m.Terminal { - dAtA[i] = 1 - } else { - dAtA[i] = 0 +func (*TaskOOM) ProtoMessage() {} + +func (x *TaskOOM) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - i-- - dAtA[i] = 0x20 - } - if len(m.Stderr) > 0 { - i -= len(m.Stderr) - copy(dAtA[i:], m.Stderr) - i = encodeVarintTask(dAtA, i, uint64(len(m.Stderr))) - i-- - dAtA[i] = 0x1a - } - if len(m.Stdout) > 0 { - i -= len(m.Stdout) - copy(dAtA[i:], m.Stdout) - i = encodeVarintTask(dAtA, i, uint64(len(m.Stdout))) - i-- - dAtA[i] = 0x12 + return ms } - if len(m.Stdin) > 0 { - i -= len(m.Stdin) - copy(dAtA[i:], m.Stdin) - i = encodeVarintTask(dAtA, i, uint64(len(m.Stdin))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return mi.MessageOf(x) } -func (m *TaskExit) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +// Deprecated: Use TaskOOM.ProtoReflect.Descriptor instead. +func (*TaskOOM) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_task_proto_rawDescGZIP(), []int{5} } -func (m *TaskExit) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *TaskOOM) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" } -func (m *TaskExit) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt):]) - if err3 != nil { - return 0, err3 - } - i -= n3 - i = encodeVarintTask(dAtA, i, uint64(n3)) - i-- - dAtA[i] = 0x2a - if m.ExitStatus != 0 { - i = encodeVarintTask(dAtA, i, uint64(m.ExitStatus)) - i-- - dAtA[i] = 0x20 - } - if m.Pid != 0 { - i = encodeVarintTask(dAtA, i, uint64(m.Pid)) - i-- - dAtA[i] = 0x18 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil +type TaskExecAdded struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` } -func (m *TaskOOM) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *TaskExecAdded) Reset() { + *x = TaskExecAdded{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *TaskOOM) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *TaskExecAdded) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *TaskOOM) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} +func (*TaskExecAdded) ProtoMessage() {} -func (m *TaskExecAdded) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *TaskExecAdded) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *TaskExecAdded) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use TaskExecAdded.ProtoReflect.Descriptor instead. +func (*TaskExecAdded) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_task_proto_rawDescGZIP(), []int{6} } -func (m *TaskExecAdded) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa +func (x *TaskExecAdded) GetContainerID() string { + if x != nil { + return x.ContainerID } - return len(dAtA) - i, nil + return "" } -func (m *TaskExecStarted) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *TaskExecAdded) GetExecID() string { + if x != nil { + return x.ExecID } - return dAtA[:n], nil + return "" } -func (m *TaskExecStarted) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +type TaskExecStarted struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *TaskExecStarted) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Pid != 0 { - i = encodeVarintTask(dAtA, i, uint64(m.Pid)) - i-- - dAtA[i] = 0x18 - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` } -func (m *TaskPaused) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *TaskExecStarted) Reset() { + *x = TaskExecStarted{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *TaskPaused) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *TaskExecStarted) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *TaskPaused) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} +func (*TaskExecStarted) ProtoMessage() {} -func (m *TaskResumed) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *TaskExecStarted) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *TaskResumed) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use TaskExecStarted.ProtoReflect.Descriptor instead. +func (*TaskExecStarted) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_task_proto_rawDescGZIP(), []int{7} } -func (m *TaskResumed) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) +func (x *TaskExecStarted) GetContainerID() string { + if x != nil { + return x.ContainerID } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa + return "" +} + +func (x *TaskExecStarted) GetExecID() string { + if x != nil { + return x.ExecID } - return len(dAtA) - i, nil + return "" } -func (m *TaskCheckpointed) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *TaskExecStarted) GetPid() uint32 { + if x != nil { + return x.Pid } - return dAtA[:n], nil + return 0 } -func (m *TaskCheckpointed) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +type TaskPaused struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` } -func (m *TaskCheckpointed) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) +func (x *TaskPaused) Reset() { + *x = TaskPaused{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - if len(m.Checkpoint) > 0 { - i -= len(m.Checkpoint) - copy(dAtA[i:], m.Checkpoint) - i = encodeVarintTask(dAtA, i, uint64(len(m.Checkpoint))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil } -func encodeVarintTask(dAtA []byte, offset int, v uint64) int { - offset -= sovTask(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base +func (x *TaskPaused) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *TaskCreate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - l = len(m.Bundle) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - if len(m.Rootfs) > 0 { - for _, e := range m.Rootfs { - l = e.Size() - n += 1 + l + sovTask(uint64(l)) + +func (*TaskPaused) ProtoMessage() {} + +func (x *TaskPaused) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } - if m.IO != nil { - l = m.IO.Size() - n += 1 + l + sovTask(uint64(l)) - } - l = len(m.Checkpoint) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - if m.Pid != 0 { - n += 1 + sovTask(uint64(m.Pid)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n + return mi.MessageOf(x) } -func (m *TaskStart) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - if m.Pid != 0 { - n += 1 + sovTask(uint64(m.Pid)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n +// Deprecated: Use TaskPaused.ProtoReflect.Descriptor instead. +func (*TaskPaused) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_task_proto_rawDescGZIP(), []int{8} } -func (m *TaskDelete) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - if m.Pid != 0 { - n += 1 + sovTask(uint64(m.Pid)) +func (x *TaskPaused) GetContainerID() string { + if x != nil { + return x.ContainerID } - if m.ExitStatus != 0 { - n += 1 + sovTask(uint64(m.ExitStatus)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt) - n += 1 + l + sovTask(uint64(l)) - l = len(m.ID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n + return "" } -func (m *TaskIO) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Stdin) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - l = len(m.Stdout) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - l = len(m.Stderr) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - if m.Terminal { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n +type TaskResumed struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` } -func (m *TaskExit) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - l = len(m.ID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - if m.Pid != 0 { - n += 1 + sovTask(uint64(m.Pid)) - } - if m.ExitStatus != 0 { - n += 1 + sovTask(uint64(m.ExitStatus)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt) - n += 1 + l + sovTask(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) +func (x *TaskResumed) Reset() { + *x = TaskResumed{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return n } -func (m *TaskOOM) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n +func (x *TaskResumed) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *TaskExecAdded) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) +func (*TaskResumed) ProtoMessage() {} + +func (x *TaskResumed) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return n + return mi.MessageOf(x) } -func (m *TaskExecStarted) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - if m.Pid != 0 { - n += 1 + sovTask(uint64(m.Pid)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n +// Deprecated: Use TaskResumed.ProtoReflect.Descriptor instead. +func (*TaskResumed) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_task_proto_rawDescGZIP(), []int{9} } -func (m *TaskPaused) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) +func (x *TaskResumed) GetContainerID() string { + if x != nil { + return x.ContainerID } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n + return "" } -func (m *TaskResumed) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n +type TaskCheckpointed struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + Checkpoint string `protobuf:"bytes,2,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` } -func (m *TaskCheckpointed) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - l = len(m.Checkpoint) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) +func (x *TaskCheckpointed) Reset() { + *x = TaskCheckpointed{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovTask(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozTask(x uint64) (n int) { - return sovTask(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *TaskCreate) String() string { - if this == nil { - return "nil" - } - repeatedStringForRootfs := "[]*Mount{" - for _, f := range this.Rootfs { - repeatedStringForRootfs += strings.Replace(fmt.Sprintf("%v", f), "Mount", "types.Mount", 1) + "," - } - repeatedStringForRootfs += "}" - s := strings.Join([]string{`&TaskCreate{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `Bundle:` + fmt.Sprintf("%v", this.Bundle) + `,`, - `Rootfs:` + repeatedStringForRootfs + `,`, - `IO:` + strings.Replace(this.IO.String(), "TaskIO", "TaskIO", 1) + `,`, - `Checkpoint:` + fmt.Sprintf("%v", this.Checkpoint) + `,`, - `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *TaskStart) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TaskStart{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *TaskDelete) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TaskDelete{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, - `ExitedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExitedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *TaskIO) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TaskIO{`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, - `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, - `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *TaskExit) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TaskExit{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, - `ExitedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExitedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *TaskOOM) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TaskOOM{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *TaskExecAdded) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TaskExecAdded{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *TaskExecStarted) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TaskExecStarted{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *TaskPaused) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TaskPaused{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *TaskResumed) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TaskResumed{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *TaskCheckpointed) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TaskCheckpointed{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `Checkpoint:` + fmt.Sprintf("%v", this.Checkpoint) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringTask(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *TaskCreate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TaskCreate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TaskCreate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Bundle", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Bundle = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rootfs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rootfs = append(m.Rootfs, &types.Mount{}) - if err := m.Rootfs[len(m.Rootfs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IO", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.IO == nil { - m.IO = &TaskIO{} - } - if err := m.IO.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Checkpoint", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Checkpoint = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) - } - m.Pid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTask(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTask - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TaskStart) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TaskStart: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TaskStart: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) - } - m.Pid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTask(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTask - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil } -func (m *TaskDelete) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TaskDelete: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TaskDelete: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) - } - m.Pid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) - } - m.ExitStatus = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExitStatus |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTask(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTask - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil +func (x *TaskCheckpointed) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *TaskIO) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TaskIO: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TaskIO: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdin = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdout = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stderr = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Terminal = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipTask(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTask - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TaskExit) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TaskExit: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TaskExit: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) - } - m.Pid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) - } - m.ExitStatus = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExitStatus |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTask(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTask - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TaskOOM) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TaskOOM: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TaskOOM: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTask(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTask - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TaskExecAdded) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TaskExecAdded: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TaskExecAdded: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTask(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTask - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TaskExecStarted) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TaskExecStarted: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TaskExecStarted: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) - } - m.Pid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTask(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTask - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TaskPaused) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TaskPaused: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TaskPaused: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTask(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTask - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } +func (*TaskCheckpointed) ProtoMessage() {} - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TaskResumed) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TaskResumed: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TaskResumed: illegal tag %d (wire type %d)", fieldNum, wire) +func (x *TaskCheckpointed) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_events_task_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTask(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTask - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF + return ms } - return nil + return mi.MessageOf(x) } -func (m *TaskCheckpointed) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TaskCheckpointed: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TaskCheckpointed: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Checkpoint", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Checkpoint = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTask(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTask - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipTask(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTask - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTask - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTask - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthTask - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupTask - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthTask - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF +// Deprecated: Use TaskCheckpointed.ProtoReflect.Descriptor instead. +func (*TaskCheckpointed) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_events_task_proto_rawDescGZIP(), []int{10} +} + +func (x *TaskCheckpointed) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} + +func (x *TaskCheckpointed) GetCheckpoint() string { + if x != nil { + return x.Checkpoint + } + return "" +} + +var File_github_com_containerd_containerd_api_events_task_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_events_task_proto_rawDesc = []byte{ + 0x0a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x74, 0x61, + 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x36, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd5, 0x01, 0x0a, 0x0a, 0x54, 0x61, 0x73, 0x6b, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x6e, 0x64, + 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, + 0x12, 0x2f, 0x0a, 0x06, 0x72, 0x6f, 0x6f, 0x74, 0x66, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x06, 0x72, 0x6f, 0x6f, 0x74, 0x66, + 0x73, 0x12, 0x29, 0x0a, 0x02, 0x69, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x73, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x4f, 0x52, 0x02, 0x69, 0x6f, 0x12, 0x1e, 0x0a, 0x0a, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x10, 0x0a, 0x03, + 0x70, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x22, 0x40, + 0x0a, 0x09, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x10, + 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, + 0x22, 0xab, 0x01, 0x0a, 0x0a, 0x54, 0x61, 0x73, 0x6b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, + 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x03, 0x70, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x69, 0x74, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x65, 0x78, 0x69, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x09, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x5f, + 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x6a, + 0x0a, 0x06, 0x54, 0x61, 0x73, 0x6b, 0x49, 0x4f, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x64, 0x69, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x64, 0x69, 0x6e, 0x12, 0x16, + 0x0a, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x64, 0x65, 0x72, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, 0x65, 0x72, 0x72, 0x12, 0x1a, + 0x0a, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x22, 0xa9, 0x01, 0x0a, 0x08, 0x54, + 0x61, 0x73, 0x6b, 0x45, 0x78, 0x69, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, + 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, + 0x65, 0x78, 0x69, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x0a, 0x65, 0x78, 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, + 0x09, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, + 0x69, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x2c, 0x0a, 0x07, 0x54, 0x61, 0x73, 0x6b, 0x4f, 0x4f, + 0x4d, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x49, 0x64, 0x22, 0x4b, 0x0a, 0x0d, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, + 0x41, 0x64, 0x64, 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, + 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, + 0x64, 0x22, 0x5f, 0x0a, 0x0f, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x53, 0x74, 0x61, + 0x72, 0x74, 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, + 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, + 0x69, 0x64, 0x22, 0x2f, 0x0a, 0x0a, 0x54, 0x61, 0x73, 0x6b, 0x50, 0x61, 0x75, 0x73, 0x65, 0x64, + 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x49, 0x64, 0x22, 0x30, 0x0a, 0x0b, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x75, 0x6d, + 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x49, 0x64, 0x22, 0x55, 0x0a, 0x10, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, + 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x42, 0x38, 0x5a, 0x32, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x3b, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0xa0, 0xf4, 0x1e, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( - ErrInvalidLengthTask = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTask = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupTask = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_events_task_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_events_task_proto_rawDescData = file_github_com_containerd_containerd_api_events_task_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_events_task_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_events_task_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_events_task_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_events_task_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_events_task_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_events_task_proto_msgTypes = make([]protoimpl.MessageInfo, 11) +var file_github_com_containerd_containerd_api_events_task_proto_goTypes = []interface{}{ + (*TaskCreate)(nil), // 0: containerd.events.TaskCreate + (*TaskStart)(nil), // 1: containerd.events.TaskStart + (*TaskDelete)(nil), // 2: containerd.events.TaskDelete + (*TaskIO)(nil), // 3: containerd.events.TaskIO + (*TaskExit)(nil), // 4: containerd.events.TaskExit + (*TaskOOM)(nil), // 5: containerd.events.TaskOOM + (*TaskExecAdded)(nil), // 6: containerd.events.TaskExecAdded + (*TaskExecStarted)(nil), // 7: containerd.events.TaskExecStarted + (*TaskPaused)(nil), // 8: containerd.events.TaskPaused + (*TaskResumed)(nil), // 9: containerd.events.TaskResumed + (*TaskCheckpointed)(nil), // 10: containerd.events.TaskCheckpointed + (*types.Mount)(nil), // 11: containerd.types.Mount + (*timestamppb.Timestamp)(nil), // 12: google.protobuf.Timestamp +} +var file_github_com_containerd_containerd_api_events_task_proto_depIdxs = []int32{ + 11, // 0: containerd.events.TaskCreate.rootfs:type_name -> containerd.types.Mount + 3, // 1: containerd.events.TaskCreate.io:type_name -> containerd.events.TaskIO + 12, // 2: containerd.events.TaskDelete.exited_at:type_name -> google.protobuf.Timestamp + 12, // 3: containerd.events.TaskExit.exited_at:type_name -> google.protobuf.Timestamp + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_events_task_proto_init() } +func file_github_com_containerd_containerd_api_events_task_proto_init() { + if File_github_com_containerd_containerd_api_events_task_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_events_task_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskCreate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_task_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskStart); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_task_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskDelete); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_task_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskIO); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_task_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskExit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_task_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskOOM); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_task_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskExecAdded); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_task_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskExecStarted); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_task_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskPaused); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_task_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskResumed); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_events_task_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskCheckpointed); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_events_task_proto_rawDesc, + NumEnums: 0, + NumMessages: 11, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_events_task_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_events_task_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_events_task_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_events_task_proto = out.File + file_github_com_containerd_containerd_api_events_task_proto_rawDesc = nil + file_github_com_containerd_containerd_api_events_task_proto_goTypes = nil + file_github_com_containerd_containerd_api_events_task_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/events/task.proto b/vendor/github.com/containerd/containerd/api/events/task.proto index 3cbbbf00a03a7..238564dfe2d0d 100644 --- a/vendor/github.com/containerd/containerd/api/events/task.proto +++ b/vendor/github.com/containerd/containerd/api/events/task.proto @@ -18,10 +18,9 @@ syntax = "proto3"; package containerd.events; -import weak "gogoproto/gogo.proto"; import "google/protobuf/timestamp.proto"; import "github.com/containerd/containerd/api/types/mount.proto"; -import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; +import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; option go_package = "github.com/containerd/containerd/api/events;events"; option (containerd.plugin.fieldpath_all) = true; @@ -30,7 +29,7 @@ message TaskCreate { string container_id = 1; string bundle = 2; repeated containerd.types.Mount rootfs = 3; - TaskIO io = 4 [(gogoproto.customname) = "IO"]; + TaskIO io = 4; string checkpoint = 5; uint32 pid = 6; } @@ -44,7 +43,7 @@ message TaskDelete { string container_id = 1; uint32 pid = 2; uint32 exit_status = 3; - google.protobuf.Timestamp exited_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp exited_at = 4; // id is the specific exec. By default if omitted will be `""` thus matches // the init exec of the task matching `container_id`. string id = 5; @@ -62,7 +61,7 @@ message TaskExit { string id = 2; uint32 pid = 3; uint32 exit_status = 4; - google.protobuf.Timestamp exited_at = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp exited_at = 5; } message TaskOOM { diff --git a/vendor/github.com/containerd/containerd/api/events/task_fieldpath.pb.go b/vendor/github.com/containerd/containerd/api/events/task_fieldpath.pb.go new file mode 100644 index 0000000000000..633fc3a6534ac --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/events/task_fieldpath.pb.go @@ -0,0 +1,191 @@ +// Code generated by protoc-gen-go-fieldpath. DO NOT EDIT. +// source: github.com/containerd/containerd/api/events/task.proto +package events + +import ( + fmt "fmt" +) + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskCreate) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + // unhandled: rootfs + // unhandled: pid + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + case "bundle": + return string(m.Bundle), len(m.Bundle) > 0 + case "io": + // NOTE(stevvooe): This is probably not correct in many cases. + // We assume that the target message also implements the Field + // method, which isn't likely true in a lot of cases. + // + // If you have a broken build and have found this comment, + // you may be closer to a solution. + if m.IO == nil { + return "", false + } + return m.IO.Field(fieldpath[1:]) + case "checkpoint": + return string(m.Checkpoint), len(m.Checkpoint) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskStart) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + // unhandled: pid + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskDelete) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + // unhandled: pid + // unhandled: exit_status + // unhandled: exited_at + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + case "id": + return string(m.ID), len(m.ID) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskIO) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "stdin": + return string(m.Stdin), len(m.Stdin) > 0 + case "stdout": + return string(m.Stdout), len(m.Stdout) > 0 + case "stderr": + return string(m.Stderr), len(m.Stderr) > 0 + case "terminal": + return fmt.Sprint(m.Terminal), true + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskExit) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + // unhandled: pid + // unhandled: exit_status + // unhandled: exited_at + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + case "id": + return string(m.ID), len(m.ID) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskOOM) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskExecAdded) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + case "exec_id": + return string(m.ExecID), len(m.ExecID) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskExecStarted) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + // unhandled: pid + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + case "exec_id": + return string(m.ExecID), len(m.ExecID) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskPaused) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskResumed) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *TaskCheckpointed) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "container_id": + return string(m.ContainerID), len(m.ContainerID) > 0 + case "checkpoint": + return string(m.Checkpoint), len(m.Checkpoint) > 0 + } + return "", false +} diff --git a/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/doc.go b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/doc.go new file mode 100644 index 0000000000000..f960350c1613c --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/doc.go @@ -0,0 +1,17 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sandbox diff --git a/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.pb.go b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.pb.go new file mode 100644 index 0000000000000..373ecd24b1ea6 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.pb.go @@ -0,0 +1,1470 @@ +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 +// source: github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.proto + +package sandbox + +import ( + types "github.com/containerd/containerd/api/types" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CreateSandboxRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + BundlePath string `protobuf:"bytes,2,opt,name=bundle_path,json=bundlePath,proto3" json:"bundle_path,omitempty"` + Rootfs []*types.Mount `protobuf:"bytes,3,rep,name=rootfs,proto3" json:"rootfs,omitempty"` + Options *anypb.Any `protobuf:"bytes,4,opt,name=options,proto3" json:"options,omitempty"` +} + +func (x *CreateSandboxRequest) Reset() { + *x = CreateSandboxRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateSandboxRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateSandboxRequest) ProtoMessage() {} + +func (x *CreateSandboxRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateSandboxRequest.ProtoReflect.Descriptor instead. +func (*CreateSandboxRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{0} +} + +func (x *CreateSandboxRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +func (x *CreateSandboxRequest) GetBundlePath() string { + if x != nil { + return x.BundlePath + } + return "" +} + +func (x *CreateSandboxRequest) GetRootfs() []*types.Mount { + if x != nil { + return x.Rootfs + } + return nil +} + +func (x *CreateSandboxRequest) GetOptions() *anypb.Any { + if x != nil { + return x.Options + } + return nil +} + +type CreateSandboxResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CreateSandboxResponse) Reset() { + *x = CreateSandboxResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateSandboxResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateSandboxResponse) ProtoMessage() {} + +func (x *CreateSandboxResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateSandboxResponse.ProtoReflect.Descriptor instead. +func (*CreateSandboxResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{1} +} + +type StartSandboxRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` +} + +func (x *StartSandboxRequest) Reset() { + *x = StartSandboxRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StartSandboxRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartSandboxRequest) ProtoMessage() {} + +func (x *StartSandboxRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartSandboxRequest.ProtoReflect.Descriptor instead. +func (*StartSandboxRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{2} +} + +func (x *StartSandboxRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +type StartSandboxResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` +} + +func (x *StartSandboxResponse) Reset() { + *x = StartSandboxResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StartSandboxResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartSandboxResponse) ProtoMessage() {} + +func (x *StartSandboxResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartSandboxResponse.ProtoReflect.Descriptor instead. +func (*StartSandboxResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{3} +} + +func (x *StartSandboxResponse) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} + +func (x *StartSandboxResponse) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil +} + +type PlatformRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` +} + +func (x *PlatformRequest) Reset() { + *x = PlatformRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PlatformRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PlatformRequest) ProtoMessage() {} + +func (x *PlatformRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PlatformRequest.ProtoReflect.Descriptor instead. +func (*PlatformRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{4} +} + +func (x *PlatformRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +type PlatformResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Platform *types.Platform `protobuf:"bytes,1,opt,name=platform,proto3" json:"platform,omitempty"` +} + +func (x *PlatformResponse) Reset() { + *x = PlatformResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PlatformResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PlatformResponse) ProtoMessage() {} + +func (x *PlatformResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PlatformResponse.ProtoReflect.Descriptor instead. +func (*PlatformResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{5} +} + +func (x *PlatformResponse) GetPlatform() *types.Platform { + if x != nil { + return x.Platform + } + return nil +} + +type StopSandboxRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + TimeoutSecs uint32 `protobuf:"varint,2,opt,name=timeout_secs,json=timeoutSecs,proto3" json:"timeout_secs,omitempty"` +} + +func (x *StopSandboxRequest) Reset() { + *x = StopSandboxRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StopSandboxRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StopSandboxRequest) ProtoMessage() {} + +func (x *StopSandboxRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StopSandboxRequest.ProtoReflect.Descriptor instead. +func (*StopSandboxRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{6} +} + +func (x *StopSandboxRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +func (x *StopSandboxRequest) GetTimeoutSecs() uint32 { + if x != nil { + return x.TimeoutSecs + } + return 0 +} + +type StopSandboxResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *StopSandboxResponse) Reset() { + *x = StopSandboxResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StopSandboxResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StopSandboxResponse) ProtoMessage() {} + +func (x *StopSandboxResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StopSandboxResponse.ProtoReflect.Descriptor instead. +func (*StopSandboxResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{7} +} + +type UpdateSandboxRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + Resources *anypb.Any `protobuf:"bytes,2,opt,name=resources,proto3" json:"resources,omitempty"` + Annotations map[string]string `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *UpdateSandboxRequest) Reset() { + *x = UpdateSandboxRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateSandboxRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateSandboxRequest) ProtoMessage() {} + +func (x *UpdateSandboxRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateSandboxRequest.ProtoReflect.Descriptor instead. +func (*UpdateSandboxRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{8} +} + +func (x *UpdateSandboxRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +func (x *UpdateSandboxRequest) GetResources() *anypb.Any { + if x != nil { + return x.Resources + } + return nil +} + +func (x *UpdateSandboxRequest) GetAnnotations() map[string]string { + if x != nil { + return x.Annotations + } + return nil +} + +type WaitSandboxRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` +} + +func (x *WaitSandboxRequest) Reset() { + *x = WaitSandboxRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WaitSandboxRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WaitSandboxRequest) ProtoMessage() {} + +func (x *WaitSandboxRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WaitSandboxRequest.ProtoReflect.Descriptor instead. +func (*WaitSandboxRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{9} +} + +func (x *WaitSandboxRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +type WaitSandboxResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ExitStatus uint32 `protobuf:"varint,1,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=exited_at,json=exitedAt,proto3" json:"exited_at,omitempty"` +} + +func (x *WaitSandboxResponse) Reset() { + *x = WaitSandboxResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WaitSandboxResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WaitSandboxResponse) ProtoMessage() {} + +func (x *WaitSandboxResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WaitSandboxResponse.ProtoReflect.Descriptor instead. +func (*WaitSandboxResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{10} +} + +func (x *WaitSandboxResponse) GetExitStatus() uint32 { + if x != nil { + return x.ExitStatus + } + return 0 +} + +func (x *WaitSandboxResponse) GetExitedAt() *timestamppb.Timestamp { + if x != nil { + return x.ExitedAt + } + return nil +} + +type UpdateSandboxResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *UpdateSandboxResponse) Reset() { + *x = UpdateSandboxResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateSandboxResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateSandboxResponse) ProtoMessage() {} + +func (x *UpdateSandboxResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateSandboxResponse.ProtoReflect.Descriptor instead. +func (*UpdateSandboxResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{11} +} + +type SandboxStatusRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + Verbose bool `protobuf:"varint,2,opt,name=verbose,proto3" json:"verbose,omitempty"` +} + +func (x *SandboxStatusRequest) Reset() { + *x = SandboxStatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SandboxStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SandboxStatusRequest) ProtoMessage() {} + +func (x *SandboxStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SandboxStatusRequest.ProtoReflect.Descriptor instead. +func (*SandboxStatusRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{12} +} + +func (x *SandboxStatusRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +func (x *SandboxStatusRequest) GetVerbose() bool { + if x != nil { + return x.Verbose + } + return false +} + +type SandboxStatusResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` + State string `protobuf:"bytes,3,opt,name=state,proto3" json:"state,omitempty"` + Info map[string]string `protobuf:"bytes,4,rep,name=info,proto3" json:"info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + ExitedAt *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=exited_at,json=exitedAt,proto3" json:"exited_at,omitempty"` + Extra *anypb.Any `protobuf:"bytes,7,opt,name=extra,proto3" json:"extra,omitempty"` +} + +func (x *SandboxStatusResponse) Reset() { + *x = SandboxStatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SandboxStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SandboxStatusResponse) ProtoMessage() {} + +func (x *SandboxStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SandboxStatusResponse.ProtoReflect.Descriptor instead. +func (*SandboxStatusResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{13} +} + +func (x *SandboxStatusResponse) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +func (x *SandboxStatusResponse) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} + +func (x *SandboxStatusResponse) GetState() string { + if x != nil { + return x.State + } + return "" +} + +func (x *SandboxStatusResponse) GetInfo() map[string]string { + if x != nil { + return x.Info + } + return nil +} + +func (x *SandboxStatusResponse) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil +} + +func (x *SandboxStatusResponse) GetExitedAt() *timestamppb.Timestamp { + if x != nil { + return x.ExitedAt + } + return nil +} + +func (x *SandboxStatusResponse) GetExtra() *anypb.Any { + if x != nil { + return x.Extra + } + return nil +} + +type PingRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` +} + +func (x *PingRequest) Reset() { + *x = PingRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PingRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PingRequest) ProtoMessage() {} + +func (x *PingRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PingRequest.ProtoReflect.Descriptor instead. +func (*PingRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{14} +} + +func (x *PingRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +type PingResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PingResponse) Reset() { + *x = PingResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PingResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PingResponse) ProtoMessage() {} + +func (x *PingResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PingResponse.ProtoReflect.Descriptor instead. +func (*PingResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{15} +} + +type ShutdownSandboxRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` +} + +func (x *ShutdownSandboxRequest) Reset() { + *x = ShutdownSandboxRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ShutdownSandboxRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ShutdownSandboxRequest) ProtoMessage() {} + +func (x *ShutdownSandboxRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ShutdownSandboxRequest.ProtoReflect.Descriptor instead. +func (*ShutdownSandboxRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{16} +} + +func (x *ShutdownSandboxRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +type ShutdownSandboxResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ShutdownSandboxResponse) Reset() { + *x = ShutdownSandboxResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ShutdownSandboxResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ShutdownSandboxResponse) ProtoMessage() {} + +func (x *ShutdownSandboxResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ShutdownSandboxResponse.ProtoReflect.Descriptor instead. +func (*ShutdownSandboxResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{17} +} + +var File_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDesc = []byte{ + 0x0a, 0x45, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2f, 0x73, + 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, + 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, + 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6d, + 0x6f, 0x75, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x39, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb7, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, + 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x1f, 0x0a, + 0x0b, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x2f, + 0x0a, 0x06, 0x72, 0x6f, 0x6f, 0x74, 0x66, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x06, 0x72, 0x6f, 0x6f, 0x74, 0x66, 0x73, 0x12, + 0x2e, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, + 0x17, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x34, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x72, + 0x74, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x63, + 0x0a, 0x14, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x41, 0x74, 0x22, 0x30, 0x0a, 0x0f, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, + 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, + 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x4a, 0x0a, 0x10, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, + 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x08, 0x70, 0x6c, 0x61, + 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, + 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, + 0x6d, 0x22, 0x56, 0x0a, 0x12, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, + 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, + 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x5f, 0x73, 0x65, 0x63, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x74, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x73, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x74, 0x6f, + 0x70, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x91, 0x02, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x61, 0x6e, 0x64, 0x62, + 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, + 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, + 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x32, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, + 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x66, 0x0a, 0x0b, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x44, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, + 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x33, 0x0a, 0x12, 0x57, 0x61, 0x69, 0x74, 0x53, 0x61, 0x6e, 0x64, + 0x62, 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, + 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x6f, 0x0a, 0x13, 0x57, 0x61, 0x69, + 0x74, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x69, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x65, 0x78, 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x37, 0x0a, 0x09, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x17, 0x0a, 0x15, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x4f, 0x0a, 0x14, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, + 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, + 0x72, 0x62, 0x6f, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x76, 0x65, 0x72, + 0x62, 0x6f, 0x73, 0x65, 0x22, 0x8b, 0x03, 0x0a, 0x15, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, + 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x10, 0x0a, + 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, + 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x52, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x64, 0x41, 0x74, 0x12, 0x37, 0x0a, 0x09, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x61, + 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x2a, 0x0a, + 0x05, 0x65, 0x78, 0x74, 0x72, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, + 0x6e, 0x79, 0x52, 0x05, 0x65, 0x78, 0x74, 0x72, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x49, 0x6e, 0x66, + 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0x2c, 0x0a, 0x0b, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, + 0x22, 0x0e, 0x0a, 0x0c, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x37, 0x0a, 0x16, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x53, 0x61, 0x6e, 0x64, + 0x62, 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, + 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x19, 0x0a, 0x17, 0x53, 0x68, 0x75, + 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xbe, 0x07, 0x0a, 0x07, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, + 0x12, 0x7a, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, + 0x78, 0x12, 0x33, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, + 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, + 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x61, 0x6e, + 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, 0x0c, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x12, 0x32, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, + 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, + 0x72, 0x74, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x33, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, + 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6b, 0x0a, 0x08, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, + 0x6d, 0x12, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, + 0x31, 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, + 0x31, 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x74, 0x0a, 0x0b, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, + 0x78, 0x12, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, + 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x74, 0x0a, 0x0b, 0x57, 0x61, 0x69, 0x74, + 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x12, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, + 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x61, 0x69, 0x74, 0x53, 0x61, 0x6e, 0x64, + 0x62, 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, + 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x61, 0x69, 0x74, 0x53, + 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7a, + 0x0a, 0x0d, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, + 0x33, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, + 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x0b, 0x50, 0x69, + 0x6e, 0x67, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x12, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, + 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, + 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x80, 0x01, 0x0a, 0x0f, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x53, + 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x12, 0x35, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, + 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x53, + 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, + 0x6d, 0x65, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x68, + 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x41, 0x5a, 0x3f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2f, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2f, 0x76, 0x31, + 0x3b, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescData = file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDesc +) + +func file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes = make([]protoimpl.MessageInfo, 20) +var file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_goTypes = []interface{}{ + (*CreateSandboxRequest)(nil), // 0: containerd.runtime.sandbox.v1.CreateSandboxRequest + (*CreateSandboxResponse)(nil), // 1: containerd.runtime.sandbox.v1.CreateSandboxResponse + (*StartSandboxRequest)(nil), // 2: containerd.runtime.sandbox.v1.StartSandboxRequest + (*StartSandboxResponse)(nil), // 3: containerd.runtime.sandbox.v1.StartSandboxResponse + (*PlatformRequest)(nil), // 4: containerd.runtime.sandbox.v1.PlatformRequest + (*PlatformResponse)(nil), // 5: containerd.runtime.sandbox.v1.PlatformResponse + (*StopSandboxRequest)(nil), // 6: containerd.runtime.sandbox.v1.StopSandboxRequest + (*StopSandboxResponse)(nil), // 7: containerd.runtime.sandbox.v1.StopSandboxResponse + (*UpdateSandboxRequest)(nil), // 8: containerd.runtime.sandbox.v1.UpdateSandboxRequest + (*WaitSandboxRequest)(nil), // 9: containerd.runtime.sandbox.v1.WaitSandboxRequest + (*WaitSandboxResponse)(nil), // 10: containerd.runtime.sandbox.v1.WaitSandboxResponse + (*UpdateSandboxResponse)(nil), // 11: containerd.runtime.sandbox.v1.UpdateSandboxResponse + (*SandboxStatusRequest)(nil), // 12: containerd.runtime.sandbox.v1.SandboxStatusRequest + (*SandboxStatusResponse)(nil), // 13: containerd.runtime.sandbox.v1.SandboxStatusResponse + (*PingRequest)(nil), // 14: containerd.runtime.sandbox.v1.PingRequest + (*PingResponse)(nil), // 15: containerd.runtime.sandbox.v1.PingResponse + (*ShutdownSandboxRequest)(nil), // 16: containerd.runtime.sandbox.v1.ShutdownSandboxRequest + (*ShutdownSandboxResponse)(nil), // 17: containerd.runtime.sandbox.v1.ShutdownSandboxResponse + nil, // 18: containerd.runtime.sandbox.v1.UpdateSandboxRequest.AnnotationsEntry + nil, // 19: containerd.runtime.sandbox.v1.SandboxStatusResponse.InfoEntry + (*types.Mount)(nil), // 20: containerd.types.Mount + (*anypb.Any)(nil), // 21: google.protobuf.Any + (*timestamppb.Timestamp)(nil), // 22: google.protobuf.Timestamp + (*types.Platform)(nil), // 23: containerd.types.Platform +} +var file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_depIdxs = []int32{ + 20, // 0: containerd.runtime.sandbox.v1.CreateSandboxRequest.rootfs:type_name -> containerd.types.Mount + 21, // 1: containerd.runtime.sandbox.v1.CreateSandboxRequest.options:type_name -> google.protobuf.Any + 22, // 2: containerd.runtime.sandbox.v1.StartSandboxResponse.created_at:type_name -> google.protobuf.Timestamp + 23, // 3: containerd.runtime.sandbox.v1.PlatformResponse.platform:type_name -> containerd.types.Platform + 21, // 4: containerd.runtime.sandbox.v1.UpdateSandboxRequest.resources:type_name -> google.protobuf.Any + 18, // 5: containerd.runtime.sandbox.v1.UpdateSandboxRequest.annotations:type_name -> containerd.runtime.sandbox.v1.UpdateSandboxRequest.AnnotationsEntry + 22, // 6: containerd.runtime.sandbox.v1.WaitSandboxResponse.exited_at:type_name -> google.protobuf.Timestamp + 19, // 7: containerd.runtime.sandbox.v1.SandboxStatusResponse.info:type_name -> containerd.runtime.sandbox.v1.SandboxStatusResponse.InfoEntry + 22, // 8: containerd.runtime.sandbox.v1.SandboxStatusResponse.created_at:type_name -> google.protobuf.Timestamp + 22, // 9: containerd.runtime.sandbox.v1.SandboxStatusResponse.exited_at:type_name -> google.protobuf.Timestamp + 21, // 10: containerd.runtime.sandbox.v1.SandboxStatusResponse.extra:type_name -> google.protobuf.Any + 0, // 11: containerd.runtime.sandbox.v1.Sandbox.CreateSandbox:input_type -> containerd.runtime.sandbox.v1.CreateSandboxRequest + 2, // 12: containerd.runtime.sandbox.v1.Sandbox.StartSandbox:input_type -> containerd.runtime.sandbox.v1.StartSandboxRequest + 4, // 13: containerd.runtime.sandbox.v1.Sandbox.Platform:input_type -> containerd.runtime.sandbox.v1.PlatformRequest + 6, // 14: containerd.runtime.sandbox.v1.Sandbox.StopSandbox:input_type -> containerd.runtime.sandbox.v1.StopSandboxRequest + 9, // 15: containerd.runtime.sandbox.v1.Sandbox.WaitSandbox:input_type -> containerd.runtime.sandbox.v1.WaitSandboxRequest + 12, // 16: containerd.runtime.sandbox.v1.Sandbox.SandboxStatus:input_type -> containerd.runtime.sandbox.v1.SandboxStatusRequest + 14, // 17: containerd.runtime.sandbox.v1.Sandbox.PingSandbox:input_type -> containerd.runtime.sandbox.v1.PingRequest + 16, // 18: containerd.runtime.sandbox.v1.Sandbox.ShutdownSandbox:input_type -> containerd.runtime.sandbox.v1.ShutdownSandboxRequest + 1, // 19: containerd.runtime.sandbox.v1.Sandbox.CreateSandbox:output_type -> containerd.runtime.sandbox.v1.CreateSandboxResponse + 3, // 20: containerd.runtime.sandbox.v1.Sandbox.StartSandbox:output_type -> containerd.runtime.sandbox.v1.StartSandboxResponse + 5, // 21: containerd.runtime.sandbox.v1.Sandbox.Platform:output_type -> containerd.runtime.sandbox.v1.PlatformResponse + 7, // 22: containerd.runtime.sandbox.v1.Sandbox.StopSandbox:output_type -> containerd.runtime.sandbox.v1.StopSandboxResponse + 10, // 23: containerd.runtime.sandbox.v1.Sandbox.WaitSandbox:output_type -> containerd.runtime.sandbox.v1.WaitSandboxResponse + 13, // 24: containerd.runtime.sandbox.v1.Sandbox.SandboxStatus:output_type -> containerd.runtime.sandbox.v1.SandboxStatusResponse + 15, // 25: containerd.runtime.sandbox.v1.Sandbox.PingSandbox:output_type -> containerd.runtime.sandbox.v1.PingResponse + 17, // 26: containerd.runtime.sandbox.v1.Sandbox.ShutdownSandbox:output_type -> containerd.runtime.sandbox.v1.ShutdownSandboxResponse + 19, // [19:27] is the sub-list for method output_type + 11, // [11:19] is the sub-list for method input_type + 11, // [11:11] is the sub-list for extension type_name + 11, // [11:11] is the sub-list for extension extendee + 0, // [0:11] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_init() } +func file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_init() { + if File_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateSandboxRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateSandboxResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StartSandboxRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StartSandboxResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlatformRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PlatformResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StopSandboxRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StopSandboxResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateSandboxRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WaitSandboxRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WaitSandboxResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateSandboxResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SandboxStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SandboxStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PingRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PingResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShutdownSandboxRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShutdownSandboxResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDesc, + NumEnums: 0, + NumMessages: 20, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto = out.File + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_rawDesc = nil + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_goTypes = nil + file_github_com_containerd_containerd_api_runtime_sandbox_v1_sandbox_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.proto b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.proto new file mode 100644 index 0000000000000..a33c9fc8fdf62 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.proto @@ -0,0 +1,135 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +syntax = "proto3"; + +package containerd.runtime.sandbox.v1; + +import "google/protobuf/any.proto"; +import "google/protobuf/timestamp.proto"; + +import "github.com/containerd/containerd/api/types/mount.proto"; +import "github.com/containerd/containerd/api/types/platform.proto"; + +option go_package = "github.com/containerd/containerd/api/runtime/sandbox/v1;sandbox"; + +// Sandbox is an optional interface that shim may implement to support sandboxes environments. +// A typical example of sandbox is microVM or pause container - an entity that groups containers and/or +// holds resources relevant for this group. +service Sandbox { + // CreateSandbox will be called right after sandbox shim instance launched. + // It is a good place to initialize sandbox environment. + rpc CreateSandbox(CreateSandboxRequest) returns (CreateSandboxResponse); + + // StartSandbox will start previsouly created sandbox. + rpc StartSandbox(StartSandboxRequest) returns (StartSandboxResponse); + + // Platform queries the platform the sandbox is going to run containers on. + // containerd will use this to generate a proper OCI spec. + rpc Platform(PlatformRequest) returns (PlatformResponse); + + // StopSandbox will stop existing sandbox instance + rpc StopSandbox(StopSandboxRequest) returns (StopSandboxResponse); + + // WaitSandbox blocks until sanbox exits. + rpc WaitSandbox(WaitSandboxRequest) returns (WaitSandboxResponse); + + // SandboxStatus will return current status of the running sandbox instance + rpc SandboxStatus(SandboxStatusRequest) returns (SandboxStatusResponse); + + // PingSandbox is a lightweight API call to check whether sandbox alive. + rpc PingSandbox(PingRequest) returns (PingResponse); + + // ShutdownSandbox must shutdown shim instance. + rpc ShutdownSandbox(ShutdownSandboxRequest) returns (ShutdownSandboxResponse); +} + +message CreateSandboxRequest { + string sandbox_id = 1; + string bundle_path = 2; + repeated containerd.types.Mount rootfs = 3; + google.protobuf.Any options = 4; +} + +message CreateSandboxResponse {} + +message StartSandboxRequest { + string sandbox_id = 1; +} + +message StartSandboxResponse { + uint32 pid = 1; + google.protobuf.Timestamp created_at = 2; +} + +message PlatformRequest { + string sandbox_id = 1; +} + +message PlatformResponse { + containerd.types.Platform platform = 1; +} + +message StopSandboxRequest { + string sandbox_id = 1; + uint32 timeout_secs = 2; +} + +message StopSandboxResponse {} + +message UpdateSandboxRequest { + string sandbox_id = 1; + google.protobuf.Any resources = 2; + map annotations = 3; +} + +message WaitSandboxRequest { + string sandbox_id = 1; +} + +message WaitSandboxResponse { + uint32 exit_status = 1; + google.protobuf.Timestamp exited_at = 2; +} + +message UpdateSandboxResponse {} + +message SandboxStatusRequest { + string sandbox_id = 1; + bool verbose = 2; +} + +message SandboxStatusResponse { + string sandbox_id = 1; + uint32 pid = 2; + string state = 3; + map info = 4; + google.protobuf.Timestamp created_at = 5; + google.protobuf.Timestamp exited_at = 6; + google.protobuf.Any extra = 7; +} + +message PingRequest { + string sandbox_id = 1; +} + +message PingResponse {} + +message ShutdownSandboxRequest { + string sandbox_id = 1; +} + +message ShutdownSandboxResponse {} diff --git a/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox_grpc.pb.go b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox_grpc.pb.go new file mode 100644 index 0000000000000..f7942498610ff --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox_grpc.pb.go @@ -0,0 +1,377 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.proto + +package sandbox + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// SandboxClient is the client API for Sandbox service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type SandboxClient interface { + // CreateSandbox will be called right after sandbox shim instance launched. + // It is a good place to initialize sandbox environment. + CreateSandbox(ctx context.Context, in *CreateSandboxRequest, opts ...grpc.CallOption) (*CreateSandboxResponse, error) + // StartSandbox will start previsouly created sandbox. + StartSandbox(ctx context.Context, in *StartSandboxRequest, opts ...grpc.CallOption) (*StartSandboxResponse, error) + // Platform queries the platform the sandbox is going to run containers on. + // containerd will use this to generate a proper OCI spec. + Platform(ctx context.Context, in *PlatformRequest, opts ...grpc.CallOption) (*PlatformResponse, error) + // StopSandbox will stop existing sandbox instance + StopSandbox(ctx context.Context, in *StopSandboxRequest, opts ...grpc.CallOption) (*StopSandboxResponse, error) + // WaitSandbox blocks until sanbox exits. + WaitSandbox(ctx context.Context, in *WaitSandboxRequest, opts ...grpc.CallOption) (*WaitSandboxResponse, error) + // SandboxStatus will return current status of the running sandbox instance + SandboxStatus(ctx context.Context, in *SandboxStatusRequest, opts ...grpc.CallOption) (*SandboxStatusResponse, error) + // PingSandbox is a lightweight API call to check whether sandbox alive. + PingSandbox(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error) + // ShutdownSandbox must shutdown shim instance. + ShutdownSandbox(ctx context.Context, in *ShutdownSandboxRequest, opts ...grpc.CallOption) (*ShutdownSandboxResponse, error) +} + +type sandboxClient struct { + cc grpc.ClientConnInterface +} + +func NewSandboxClient(cc grpc.ClientConnInterface) SandboxClient { + return &sandboxClient{cc} +} + +func (c *sandboxClient) CreateSandbox(ctx context.Context, in *CreateSandboxRequest, opts ...grpc.CallOption) (*CreateSandboxResponse, error) { + out := new(CreateSandboxResponse) + err := c.cc.Invoke(ctx, "/containerd.runtime.sandbox.v1.Sandbox/CreateSandbox", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *sandboxClient) StartSandbox(ctx context.Context, in *StartSandboxRequest, opts ...grpc.CallOption) (*StartSandboxResponse, error) { + out := new(StartSandboxResponse) + err := c.cc.Invoke(ctx, "/containerd.runtime.sandbox.v1.Sandbox/StartSandbox", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *sandboxClient) Platform(ctx context.Context, in *PlatformRequest, opts ...grpc.CallOption) (*PlatformResponse, error) { + out := new(PlatformResponse) + err := c.cc.Invoke(ctx, "/containerd.runtime.sandbox.v1.Sandbox/Platform", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *sandboxClient) StopSandbox(ctx context.Context, in *StopSandboxRequest, opts ...grpc.CallOption) (*StopSandboxResponse, error) { + out := new(StopSandboxResponse) + err := c.cc.Invoke(ctx, "/containerd.runtime.sandbox.v1.Sandbox/StopSandbox", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *sandboxClient) WaitSandbox(ctx context.Context, in *WaitSandboxRequest, opts ...grpc.CallOption) (*WaitSandboxResponse, error) { + out := new(WaitSandboxResponse) + err := c.cc.Invoke(ctx, "/containerd.runtime.sandbox.v1.Sandbox/WaitSandbox", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *sandboxClient) SandboxStatus(ctx context.Context, in *SandboxStatusRequest, opts ...grpc.CallOption) (*SandboxStatusResponse, error) { + out := new(SandboxStatusResponse) + err := c.cc.Invoke(ctx, "/containerd.runtime.sandbox.v1.Sandbox/SandboxStatus", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *sandboxClient) PingSandbox(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PingResponse, error) { + out := new(PingResponse) + err := c.cc.Invoke(ctx, "/containerd.runtime.sandbox.v1.Sandbox/PingSandbox", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *sandboxClient) ShutdownSandbox(ctx context.Context, in *ShutdownSandboxRequest, opts ...grpc.CallOption) (*ShutdownSandboxResponse, error) { + out := new(ShutdownSandboxResponse) + err := c.cc.Invoke(ctx, "/containerd.runtime.sandbox.v1.Sandbox/ShutdownSandbox", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SandboxServer is the server API for Sandbox service. +// All implementations must embed UnimplementedSandboxServer +// for forward compatibility +type SandboxServer interface { + // CreateSandbox will be called right after sandbox shim instance launched. + // It is a good place to initialize sandbox environment. + CreateSandbox(context.Context, *CreateSandboxRequest) (*CreateSandboxResponse, error) + // StartSandbox will start previsouly created sandbox. + StartSandbox(context.Context, *StartSandboxRequest) (*StartSandboxResponse, error) + // Platform queries the platform the sandbox is going to run containers on. + // containerd will use this to generate a proper OCI spec. + Platform(context.Context, *PlatformRequest) (*PlatformResponse, error) + // StopSandbox will stop existing sandbox instance + StopSandbox(context.Context, *StopSandboxRequest) (*StopSandboxResponse, error) + // WaitSandbox blocks until sanbox exits. + WaitSandbox(context.Context, *WaitSandboxRequest) (*WaitSandboxResponse, error) + // SandboxStatus will return current status of the running sandbox instance + SandboxStatus(context.Context, *SandboxStatusRequest) (*SandboxStatusResponse, error) + // PingSandbox is a lightweight API call to check whether sandbox alive. + PingSandbox(context.Context, *PingRequest) (*PingResponse, error) + // ShutdownSandbox must shutdown shim instance. + ShutdownSandbox(context.Context, *ShutdownSandboxRequest) (*ShutdownSandboxResponse, error) + mustEmbedUnimplementedSandboxServer() +} + +// UnimplementedSandboxServer must be embedded to have forward compatible implementations. +type UnimplementedSandboxServer struct { +} + +func (UnimplementedSandboxServer) CreateSandbox(context.Context, *CreateSandboxRequest) (*CreateSandboxResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateSandbox not implemented") +} +func (UnimplementedSandboxServer) StartSandbox(context.Context, *StartSandboxRequest) (*StartSandboxResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StartSandbox not implemented") +} +func (UnimplementedSandboxServer) Platform(context.Context, *PlatformRequest) (*PlatformResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Platform not implemented") +} +func (UnimplementedSandboxServer) StopSandbox(context.Context, *StopSandboxRequest) (*StopSandboxResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StopSandbox not implemented") +} +func (UnimplementedSandboxServer) WaitSandbox(context.Context, *WaitSandboxRequest) (*WaitSandboxResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method WaitSandbox not implemented") +} +func (UnimplementedSandboxServer) SandboxStatus(context.Context, *SandboxStatusRequest) (*SandboxStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SandboxStatus not implemented") +} +func (UnimplementedSandboxServer) PingSandbox(context.Context, *PingRequest) (*PingResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PingSandbox not implemented") +} +func (UnimplementedSandboxServer) ShutdownSandbox(context.Context, *ShutdownSandboxRequest) (*ShutdownSandboxResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ShutdownSandbox not implemented") +} +func (UnimplementedSandboxServer) mustEmbedUnimplementedSandboxServer() {} + +// UnsafeSandboxServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to SandboxServer will +// result in compilation errors. +type UnsafeSandboxServer interface { + mustEmbedUnimplementedSandboxServer() +} + +func RegisterSandboxServer(s grpc.ServiceRegistrar, srv SandboxServer) { + s.RegisterService(&Sandbox_ServiceDesc, srv) +} + +func _Sandbox_CreateSandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSandboxRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SandboxServer).CreateSandbox(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.runtime.sandbox.v1.Sandbox/CreateSandbox", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SandboxServer).CreateSandbox(ctx, req.(*CreateSandboxRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Sandbox_StartSandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartSandboxRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SandboxServer).StartSandbox(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.runtime.sandbox.v1.Sandbox/StartSandbox", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SandboxServer).StartSandbox(ctx, req.(*StartSandboxRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Sandbox_Platform_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PlatformRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SandboxServer).Platform(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.runtime.sandbox.v1.Sandbox/Platform", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SandboxServer).Platform(ctx, req.(*PlatformRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Sandbox_StopSandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StopSandboxRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SandboxServer).StopSandbox(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.runtime.sandbox.v1.Sandbox/StopSandbox", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SandboxServer).StopSandbox(ctx, req.(*StopSandboxRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Sandbox_WaitSandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WaitSandboxRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SandboxServer).WaitSandbox(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.runtime.sandbox.v1.Sandbox/WaitSandbox", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SandboxServer).WaitSandbox(ctx, req.(*WaitSandboxRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Sandbox_SandboxStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SandboxStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SandboxServer).SandboxStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.runtime.sandbox.v1.Sandbox/SandboxStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SandboxServer).SandboxStatus(ctx, req.(*SandboxStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Sandbox_PingSandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PingRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SandboxServer).PingSandbox(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.runtime.sandbox.v1.Sandbox/PingSandbox", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SandboxServer).PingSandbox(ctx, req.(*PingRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Sandbox_ShutdownSandbox_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ShutdownSandboxRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SandboxServer).ShutdownSandbox(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.runtime.sandbox.v1.Sandbox/ShutdownSandbox", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SandboxServer).ShutdownSandbox(ctx, req.(*ShutdownSandboxRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Sandbox_ServiceDesc is the grpc.ServiceDesc for Sandbox service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Sandbox_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.runtime.sandbox.v1.Sandbox", + HandlerType: (*SandboxServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateSandbox", + Handler: _Sandbox_CreateSandbox_Handler, + }, + { + MethodName: "StartSandbox", + Handler: _Sandbox_StartSandbox_Handler, + }, + { + MethodName: "Platform", + Handler: _Sandbox_Platform_Handler, + }, + { + MethodName: "StopSandbox", + Handler: _Sandbox_StopSandbox_Handler, + }, + { + MethodName: "WaitSandbox", + Handler: _Sandbox_WaitSandbox_Handler, + }, + { + MethodName: "SandboxStatus", + Handler: _Sandbox_SandboxStatus_Handler, + }, + { + MethodName: "PingSandbox", + Handler: _Sandbox_PingSandbox_Handler, + }, + { + MethodName: "ShutdownSandbox", + Handler: _Sandbox_ShutdownSandbox_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox_ttrpc.pb.go b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox_ttrpc.pb.go new file mode 100644 index 0000000000000..d935fe611f13a --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox_ttrpc.pb.go @@ -0,0 +1,156 @@ +// Code generated by protoc-gen-go-ttrpc. DO NOT EDIT. +// source: github.com/containerd/containerd/api/runtime/sandbox/v1/sandbox.proto +package sandbox + +import ( + context "context" + ttrpc "github.com/containerd/ttrpc" +) + +type TTRPCSandboxService interface { + CreateSandbox(context.Context, *CreateSandboxRequest) (*CreateSandboxResponse, error) + StartSandbox(context.Context, *StartSandboxRequest) (*StartSandboxResponse, error) + Platform(context.Context, *PlatformRequest) (*PlatformResponse, error) + StopSandbox(context.Context, *StopSandboxRequest) (*StopSandboxResponse, error) + WaitSandbox(context.Context, *WaitSandboxRequest) (*WaitSandboxResponse, error) + SandboxStatus(context.Context, *SandboxStatusRequest) (*SandboxStatusResponse, error) + PingSandbox(context.Context, *PingRequest) (*PingResponse, error) + ShutdownSandbox(context.Context, *ShutdownSandboxRequest) (*ShutdownSandboxResponse, error) +} + +func RegisterTTRPCSandboxService(srv *ttrpc.Server, svc TTRPCSandboxService) { + srv.RegisterService("containerd.runtime.sandbox.v1.Sandbox", &ttrpc.ServiceDesc{ + Methods: map[string]ttrpc.Method{ + "CreateSandbox": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req CreateSandboxRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.CreateSandbox(ctx, &req) + }, + "StartSandbox": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req StartSandboxRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.StartSandbox(ctx, &req) + }, + "Platform": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req PlatformRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Platform(ctx, &req) + }, + "StopSandbox": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req StopSandboxRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.StopSandbox(ctx, &req) + }, + "WaitSandbox": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req WaitSandboxRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.WaitSandbox(ctx, &req) + }, + "SandboxStatus": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req SandboxStatusRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.SandboxStatus(ctx, &req) + }, + "PingSandbox": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req PingRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.PingSandbox(ctx, &req) + }, + "ShutdownSandbox": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req ShutdownSandboxRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.ShutdownSandbox(ctx, &req) + }, + }, + }) +} + +type ttrpcsandboxClient struct { + client *ttrpc.Client +} + +func NewTTRPCSandboxClient(client *ttrpc.Client) TTRPCSandboxService { + return &ttrpcsandboxClient{ + client: client, + } +} + +func (c *ttrpcsandboxClient) CreateSandbox(ctx context.Context, req *CreateSandboxRequest) (*CreateSandboxResponse, error) { + var resp CreateSandboxResponse + if err := c.client.Call(ctx, "containerd.runtime.sandbox.v1.Sandbox", "CreateSandbox", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *ttrpcsandboxClient) StartSandbox(ctx context.Context, req *StartSandboxRequest) (*StartSandboxResponse, error) { + var resp StartSandboxResponse + if err := c.client.Call(ctx, "containerd.runtime.sandbox.v1.Sandbox", "StartSandbox", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *ttrpcsandboxClient) Platform(ctx context.Context, req *PlatformRequest) (*PlatformResponse, error) { + var resp PlatformResponse + if err := c.client.Call(ctx, "containerd.runtime.sandbox.v1.Sandbox", "Platform", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *ttrpcsandboxClient) StopSandbox(ctx context.Context, req *StopSandboxRequest) (*StopSandboxResponse, error) { + var resp StopSandboxResponse + if err := c.client.Call(ctx, "containerd.runtime.sandbox.v1.Sandbox", "StopSandbox", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *ttrpcsandboxClient) WaitSandbox(ctx context.Context, req *WaitSandboxRequest) (*WaitSandboxResponse, error) { + var resp WaitSandboxResponse + if err := c.client.Call(ctx, "containerd.runtime.sandbox.v1.Sandbox", "WaitSandbox", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *ttrpcsandboxClient) SandboxStatus(ctx context.Context, req *SandboxStatusRequest) (*SandboxStatusResponse, error) { + var resp SandboxStatusResponse + if err := c.client.Call(ctx, "containerd.runtime.sandbox.v1.Sandbox", "SandboxStatus", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *ttrpcsandboxClient) PingSandbox(ctx context.Context, req *PingRequest) (*PingResponse, error) { + var resp PingResponse + if err := c.client.Call(ctx, "containerd.runtime.sandbox.v1.Sandbox", "PingSandbox", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *ttrpcsandboxClient) ShutdownSandbox(ctx context.Context, req *ShutdownSandboxRequest) (*ShutdownSandboxResponse, error) { + var resp ShutdownSandboxResponse + if err := c.client.Call(ctx, "containerd.runtime.sandbox.v1.Sandbox", "ShutdownSandbox", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} diff --git a/vendor/github.com/containerd/containerd/runtime/v2/task/doc.go b/vendor/github.com/containerd/containerd/api/runtime/task/v2/doc.go similarity index 100% rename from vendor/github.com/containerd/containerd/runtime/v2/task/doc.go rename to vendor/github.com/containerd/containerd/api/runtime/task/v2/doc.go diff --git a/vendor/github.com/containerd/containerd/api/runtime/task/v2/shim.pb.go b/vendor/github.com/containerd/containerd/api/runtime/task/v2/shim.pb.go new file mode 100644 index 0000000000000..383e29db40128 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/runtime/task/v2/shim.pb.go @@ -0,0 +1,2338 @@ +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 +// source: github.com/containerd/containerd/api/runtime/task/v2/shim.proto + +package task + +import ( + types "github.com/containerd/containerd/api/types" + task "github.com/containerd/containerd/api/types/task" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + emptypb "google.golang.org/protobuf/types/known/emptypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CreateTaskRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Bundle string `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"` + Rootfs []*types.Mount `protobuf:"bytes,3,rep,name=rootfs,proto3" json:"rootfs,omitempty"` + Terminal bool `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"` + Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"` + Checkpoint string `protobuf:"bytes,8,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` + ParentCheckpoint string `protobuf:"bytes,9,opt,name=parent_checkpoint,json=parentCheckpoint,proto3" json:"parent_checkpoint,omitempty"` + Options *anypb.Any `protobuf:"bytes,10,opt,name=options,proto3" json:"options,omitempty"` +} + +func (x *CreateTaskRequest) Reset() { + *x = CreateTaskRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateTaskRequest) ProtoMessage() {} + +func (x *CreateTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateTaskRequest.ProtoReflect.Descriptor instead. +func (*CreateTaskRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{0} +} + +func (x *CreateTaskRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *CreateTaskRequest) GetBundle() string { + if x != nil { + return x.Bundle + } + return "" +} + +func (x *CreateTaskRequest) GetRootfs() []*types.Mount { + if x != nil { + return x.Rootfs + } + return nil +} + +func (x *CreateTaskRequest) GetTerminal() bool { + if x != nil { + return x.Terminal + } + return false +} + +func (x *CreateTaskRequest) GetStdin() string { + if x != nil { + return x.Stdin + } + return "" +} + +func (x *CreateTaskRequest) GetStdout() string { + if x != nil { + return x.Stdout + } + return "" +} + +func (x *CreateTaskRequest) GetStderr() string { + if x != nil { + return x.Stderr + } + return "" +} + +func (x *CreateTaskRequest) GetCheckpoint() string { + if x != nil { + return x.Checkpoint + } + return "" +} + +func (x *CreateTaskRequest) GetParentCheckpoint() string { + if x != nil { + return x.ParentCheckpoint + } + return "" +} + +func (x *CreateTaskRequest) GetOptions() *anypb.Any { + if x != nil { + return x.Options + } + return nil +} + +type CreateTaskResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` +} + +func (x *CreateTaskResponse) Reset() { + *x = CreateTaskResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateTaskResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateTaskResponse) ProtoMessage() {} + +func (x *CreateTaskResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateTaskResponse.ProtoReflect.Descriptor instead. +func (*CreateTaskResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{1} +} + +func (x *CreateTaskResponse) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} + +type DeleteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` +} + +func (x *DeleteRequest) Reset() { + *x = DeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteRequest) ProtoMessage() {} + +func (x *DeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteRequest.ProtoReflect.Descriptor instead. +func (*DeleteRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{2} +} + +func (x *DeleteRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *DeleteRequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} + +type DeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` + ExitStatus uint32 `protobuf:"varint,2,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=exited_at,json=exitedAt,proto3" json:"exited_at,omitempty"` +} + +func (x *DeleteResponse) Reset() { + *x = DeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteResponse) ProtoMessage() {} + +func (x *DeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteResponse.ProtoReflect.Descriptor instead. +func (*DeleteResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{3} +} + +func (x *DeleteResponse) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} + +func (x *DeleteResponse) GetExitStatus() uint32 { + if x != nil { + return x.ExitStatus + } + return 0 +} + +func (x *DeleteResponse) GetExitedAt() *timestamppb.Timestamp { + if x != nil { + return x.ExitedAt + } + return nil +} + +type ExecProcessRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + Terminal bool `protobuf:"varint,3,opt,name=terminal,proto3" json:"terminal,omitempty"` + Stdin string `protobuf:"bytes,4,opt,name=stdin,proto3" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,5,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,6,opt,name=stderr,proto3" json:"stderr,omitempty"` + Spec *anypb.Any `protobuf:"bytes,7,opt,name=spec,proto3" json:"spec,omitempty"` +} + +func (x *ExecProcessRequest) Reset() { + *x = ExecProcessRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExecProcessRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExecProcessRequest) ProtoMessage() {} + +func (x *ExecProcessRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExecProcessRequest.ProtoReflect.Descriptor instead. +func (*ExecProcessRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{4} +} + +func (x *ExecProcessRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *ExecProcessRequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} + +func (x *ExecProcessRequest) GetTerminal() bool { + if x != nil { + return x.Terminal + } + return false +} + +func (x *ExecProcessRequest) GetStdin() string { + if x != nil { + return x.Stdin + } + return "" +} + +func (x *ExecProcessRequest) GetStdout() string { + if x != nil { + return x.Stdout + } + return "" +} + +func (x *ExecProcessRequest) GetStderr() string { + if x != nil { + return x.Stderr + } + return "" +} + +func (x *ExecProcessRequest) GetSpec() *anypb.Any { + if x != nil { + return x.Spec + } + return nil +} + +type ExecProcessResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ExecProcessResponse) Reset() { + *x = ExecProcessResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExecProcessResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExecProcessResponse) ProtoMessage() {} + +func (x *ExecProcessResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExecProcessResponse.ProtoReflect.Descriptor instead. +func (*ExecProcessResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{5} +} + +type ResizePtyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + Width uint32 `protobuf:"varint,3,opt,name=width,proto3" json:"width,omitempty"` + Height uint32 `protobuf:"varint,4,opt,name=height,proto3" json:"height,omitempty"` +} + +func (x *ResizePtyRequest) Reset() { + *x = ResizePtyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResizePtyRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResizePtyRequest) ProtoMessage() {} + +func (x *ResizePtyRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResizePtyRequest.ProtoReflect.Descriptor instead. +func (*ResizePtyRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{6} +} + +func (x *ResizePtyRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *ResizePtyRequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} + +func (x *ResizePtyRequest) GetWidth() uint32 { + if x != nil { + return x.Width + } + return 0 +} + +func (x *ResizePtyRequest) GetHeight() uint32 { + if x != nil { + return x.Height + } + return 0 +} + +type StateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` +} + +func (x *StateRequest) Reset() { + *x = StateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StateRequest) ProtoMessage() {} + +func (x *StateRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StateRequest.ProtoReflect.Descriptor instead. +func (*StateRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{7} +} + +func (x *StateRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *StateRequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} + +type StateResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Bundle string `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"` + Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` + Status task.Status `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"` + Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"` + Terminal bool `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"` + ExitStatus uint32 `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt *timestamppb.Timestamp `protobuf:"bytes,10,opt,name=exited_at,json=exitedAt,proto3" json:"exited_at,omitempty"` + ExecID string `protobuf:"bytes,11,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` +} + +func (x *StateResponse) Reset() { + *x = StateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StateResponse) ProtoMessage() {} + +func (x *StateResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StateResponse.ProtoReflect.Descriptor instead. +func (*StateResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{8} +} + +func (x *StateResponse) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *StateResponse) GetBundle() string { + if x != nil { + return x.Bundle + } + return "" +} + +func (x *StateResponse) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} + +func (x *StateResponse) GetStatus() task.Status { + if x != nil { + return x.Status + } + return task.Status(0) +} + +func (x *StateResponse) GetStdin() string { + if x != nil { + return x.Stdin + } + return "" +} + +func (x *StateResponse) GetStdout() string { + if x != nil { + return x.Stdout + } + return "" +} + +func (x *StateResponse) GetStderr() string { + if x != nil { + return x.Stderr + } + return "" +} + +func (x *StateResponse) GetTerminal() bool { + if x != nil { + return x.Terminal + } + return false +} + +func (x *StateResponse) GetExitStatus() uint32 { + if x != nil { + return x.ExitStatus + } + return 0 +} + +func (x *StateResponse) GetExitedAt() *timestamppb.Timestamp { + if x != nil { + return x.ExitedAt + } + return nil +} + +func (x *StateResponse) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} + +type KillRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + Signal uint32 `protobuf:"varint,3,opt,name=signal,proto3" json:"signal,omitempty"` + All bool `protobuf:"varint,4,opt,name=all,proto3" json:"all,omitempty"` +} + +func (x *KillRequest) Reset() { + *x = KillRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KillRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KillRequest) ProtoMessage() {} + +func (x *KillRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KillRequest.ProtoReflect.Descriptor instead. +func (*KillRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{9} +} + +func (x *KillRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *KillRequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} + +func (x *KillRequest) GetSignal() uint32 { + if x != nil { + return x.Signal + } + return 0 +} + +func (x *KillRequest) GetAll() bool { + if x != nil { + return x.All + } + return false +} + +type CloseIORequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + Stdin bool `protobuf:"varint,3,opt,name=stdin,proto3" json:"stdin,omitempty"` +} + +func (x *CloseIORequest) Reset() { + *x = CloseIORequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CloseIORequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CloseIORequest) ProtoMessage() {} + +func (x *CloseIORequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CloseIORequest.ProtoReflect.Descriptor instead. +func (*CloseIORequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{10} +} + +func (x *CloseIORequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *CloseIORequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} + +func (x *CloseIORequest) GetStdin() bool { + if x != nil { + return x.Stdin + } + return false +} + +type PidsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *PidsRequest) Reset() { + *x = PidsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PidsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PidsRequest) ProtoMessage() {} + +func (x *PidsRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PidsRequest.ProtoReflect.Descriptor instead. +func (*PidsRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{11} +} + +func (x *PidsRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +type PidsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Processes []*task.ProcessInfo `protobuf:"bytes,1,rep,name=processes,proto3" json:"processes,omitempty"` +} + +func (x *PidsResponse) Reset() { + *x = PidsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PidsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PidsResponse) ProtoMessage() {} + +func (x *PidsResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PidsResponse.ProtoReflect.Descriptor instead. +func (*PidsResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{12} +} + +func (x *PidsResponse) GetProcesses() []*task.ProcessInfo { + if x != nil { + return x.Processes + } + return nil +} + +type CheckpointTaskRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + Options *anypb.Any `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"` +} + +func (x *CheckpointTaskRequest) Reset() { + *x = CheckpointTaskRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CheckpointTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CheckpointTaskRequest) ProtoMessage() {} + +func (x *CheckpointTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CheckpointTaskRequest.ProtoReflect.Descriptor instead. +func (*CheckpointTaskRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{13} +} + +func (x *CheckpointTaskRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *CheckpointTaskRequest) GetPath() string { + if x != nil { + return x.Path + } + return "" +} + +func (x *CheckpointTaskRequest) GetOptions() *anypb.Any { + if x != nil { + return x.Options + } + return nil +} + +type UpdateTaskRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Resources *anypb.Any `protobuf:"bytes,2,opt,name=resources,proto3" json:"resources,omitempty"` + Annotations map[string]string `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *UpdateTaskRequest) Reset() { + *x = UpdateTaskRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateTaskRequest) ProtoMessage() {} + +func (x *UpdateTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateTaskRequest.ProtoReflect.Descriptor instead. +func (*UpdateTaskRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{14} +} + +func (x *UpdateTaskRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *UpdateTaskRequest) GetResources() *anypb.Any { + if x != nil { + return x.Resources + } + return nil +} + +func (x *UpdateTaskRequest) GetAnnotations() map[string]string { + if x != nil { + return x.Annotations + } + return nil +} + +type StartRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` +} + +func (x *StartRequest) Reset() { + *x = StartRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StartRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartRequest) ProtoMessage() {} + +func (x *StartRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartRequest.ProtoReflect.Descriptor instead. +func (*StartRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{15} +} + +func (x *StartRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *StartRequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} + +type StartResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` +} + +func (x *StartResponse) Reset() { + *x = StartResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StartResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StartResponse) ProtoMessage() {} + +func (x *StartResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StartResponse.ProtoReflect.Descriptor instead. +func (*StartResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{16} +} + +func (x *StartResponse) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} + +type WaitRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` +} + +func (x *WaitRequest) Reset() { + *x = WaitRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WaitRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WaitRequest) ProtoMessage() {} + +func (x *WaitRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WaitRequest.ProtoReflect.Descriptor instead. +func (*WaitRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{17} +} + +func (x *WaitRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *WaitRequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} + +type WaitResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ExitStatus uint32 `protobuf:"varint,1,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=exited_at,json=exitedAt,proto3" json:"exited_at,omitempty"` +} + +func (x *WaitResponse) Reset() { + *x = WaitResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WaitResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WaitResponse) ProtoMessage() {} + +func (x *WaitResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WaitResponse.ProtoReflect.Descriptor instead. +func (*WaitResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{18} +} + +func (x *WaitResponse) GetExitStatus() uint32 { + if x != nil { + return x.ExitStatus + } + return 0 +} + +func (x *WaitResponse) GetExitedAt() *timestamppb.Timestamp { + if x != nil { + return x.ExitedAt + } + return nil +} + +type StatsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *StatsRequest) Reset() { + *x = StatsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatsRequest) ProtoMessage() {} + +func (x *StatsRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatsRequest.ProtoReflect.Descriptor instead. +func (*StatsRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{19} +} + +func (x *StatsRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +type StatsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Stats *anypb.Any `protobuf:"bytes,1,opt,name=stats,proto3" json:"stats,omitempty"` +} + +func (x *StatsResponse) Reset() { + *x = StatsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StatsResponse) ProtoMessage() {} + +func (x *StatsResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StatsResponse.ProtoReflect.Descriptor instead. +func (*StatsResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{20} +} + +func (x *StatsResponse) GetStats() *anypb.Any { + if x != nil { + return x.Stats + } + return nil +} + +type ConnectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *ConnectRequest) Reset() { + *x = ConnectRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConnectRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectRequest) ProtoMessage() {} + +func (x *ConnectRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConnectRequest.ProtoReflect.Descriptor instead. +func (*ConnectRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{21} +} + +func (x *ConnectRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +type ConnectResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ShimPid uint32 `protobuf:"varint,1,opt,name=shim_pid,json=shimPid,proto3" json:"shim_pid,omitempty"` + TaskPid uint32 `protobuf:"varint,2,opt,name=task_pid,json=taskPid,proto3" json:"task_pid,omitempty"` + Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *ConnectResponse) Reset() { + *x = ConnectResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConnectResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConnectResponse) ProtoMessage() {} + +func (x *ConnectResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConnectResponse.ProtoReflect.Descriptor instead. +func (*ConnectResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{22} +} + +func (x *ConnectResponse) GetShimPid() uint32 { + if x != nil { + return x.ShimPid + } + return 0 +} + +func (x *ConnectResponse) GetTaskPid() uint32 { + if x != nil { + return x.TaskPid + } + return 0 +} + +func (x *ConnectResponse) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +type ShutdownRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Now bool `protobuf:"varint,2,opt,name=now,proto3" json:"now,omitempty"` +} + +func (x *ShutdownRequest) Reset() { + *x = ShutdownRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ShutdownRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ShutdownRequest) ProtoMessage() {} + +func (x *ShutdownRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ShutdownRequest.ProtoReflect.Descriptor instead. +func (*ShutdownRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{23} +} + +func (x *ShutdownRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +func (x *ShutdownRequest) GetNow() bool { + if x != nil { + return x.Now + } + return false +} + +type PauseRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *PauseRequest) Reset() { + *x = PauseRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PauseRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PauseRequest) ProtoMessage() {} + +func (x *PauseRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PauseRequest.ProtoReflect.Descriptor instead. +func (*PauseRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{24} +} + +func (x *PauseRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +type ResumeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *ResumeRequest) Reset() { + *x = ResumeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResumeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResumeRequest) ProtoMessage() {} + +func (x *ResumeRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResumeRequest.ProtoReflect.Descriptor instead. +func (*ResumeRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP(), []int{25} +} + +func (x *ResumeRequest) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +var File_github_com_containerd_containerd_api_runtime_task_v2_shim_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDesc = []byte{ + 0x0a, 0x3f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2f, 0x74, + 0x61, 0x73, 0x6b, 0x2f, 0x76, 0x32, 0x2f, 0x73, 0x68, 0x69, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, + 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x36, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6d, 0x6f, 0x75, 0x6e, 0x74, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0xcb, 0x02, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, + 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x6e, 0x64, + 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x6e, 0x64, 0x6c, 0x65, + 0x12, 0x2f, 0x0a, 0x06, 0x72, 0x6f, 0x6f, 0x74, 0x66, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x06, 0x72, 0x6f, 0x6f, 0x74, 0x66, + 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x74, 0x64, 0x69, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, + 0x64, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, + 0x74, 0x64, 0x65, 0x72, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, + 0x65, 0x72, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x12, 0x2e, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x22, 0x26, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x22, 0x38, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, + 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, + 0x49, 0x64, 0x22, 0x7c, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x69, 0x74, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x65, 0x78, 0x69, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x09, 0x65, 0x78, 0x69, 0x74, 0x65, + 0x64, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x41, 0x74, + 0x22, 0xc9, 0x01, 0x0a, 0x12, 0x45, 0x78, 0x65, 0x63, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, + 0x12, 0x1a, 0x0a, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x12, 0x14, 0x0a, 0x05, + 0x73, 0x74, 0x64, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x64, + 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, + 0x64, 0x65, 0x72, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, 0x65, + 0x72, 0x72, 0x12, 0x28, 0x0a, 0x04, 0x73, 0x70, 0x65, 0x63, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x22, 0x15, 0x0a, 0x13, + 0x45, 0x78, 0x65, 0x63, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x69, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x69, 0x7a, 0x65, 0x50, 0x74, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, + 0x12, 0x14, 0x0a, 0x05, 0x77, 0x69, 0x64, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x05, 0x77, 0x69, 0x64, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x37, + 0x0a, 0x0c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x17, + 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, 0x22, 0xd3, 0x02, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x6e, + 0x64, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x6e, 0x64, 0x6c, + 0x65, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, + 0x70, 0x69, 0x64, 0x12, 0x33, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x76, 0x31, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x64, 0x69, + 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x64, 0x69, 0x6e, 0x12, 0x16, + 0x0a, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x64, 0x65, 0x72, 0x72, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, 0x65, 0x72, 0x72, 0x12, 0x1a, + 0x0a, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, + 0x69, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0a, 0x65, 0x78, 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x09, 0x65, + 0x78, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, + 0x65, 0x64, 0x41, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, 0x22, 0x60, 0x0a, + 0x0b, 0x4b, 0x69, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x17, 0x0a, 0x07, + 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, + 0x78, 0x65, 0x63, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x12, 0x10, 0x0a, + 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x61, 0x6c, 0x6c, 0x22, + 0x4f, 0x0a, 0x0e, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x49, 0x4f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, + 0x64, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, + 0x64, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x73, 0x74, 0x64, 0x69, 0x6e, + 0x22, 0x1d, 0x0a, 0x0b, 0x50, 0x69, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, + 0x4e, 0x0a, 0x0c, 0x50, 0x69, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x3e, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x76, 0x31, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x73, 0x22, + 0x6b, 0x0a, 0x15, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x54, 0x61, 0x73, + 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x2e, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x41, 0x6e, 0x79, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xf1, 0x01, 0x0a, + 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x32, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x37, 0x0a, 0x0c, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, + 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, 0x22, 0x21, 0x0a, 0x0d, 0x53, 0x74, 0x61, + 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x22, 0x36, 0x0a, 0x0b, + 0x57, 0x61, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x65, + 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x78, + 0x65, 0x63, 0x49, 0x64, 0x22, 0x68, 0x0a, 0x0c, 0x57, 0x61, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x69, 0x74, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x65, 0x78, 0x69, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x09, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x5f, + 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x1e, + 0x0a, 0x0c, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x3b, + 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x2a, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x22, 0x20, 0x0a, 0x0e, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x61, 0x0a, + 0x0f, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x19, 0x0a, 0x08, 0x73, 0x68, 0x69, 0x6d, 0x5f, 0x70, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x07, 0x73, 0x68, 0x69, 0x6d, 0x50, 0x69, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x74, + 0x61, 0x73, 0x6b, 0x5f, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x74, + 0x61, 0x73, 0x6b, 0x50, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x22, 0x33, 0x0a, 0x0f, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6e, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x03, 0x6e, 0x6f, 0x77, 0x22, 0x1e, 0x0a, 0x0c, 0x50, 0x61, 0x75, 0x73, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x1f, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x32, 0x8a, 0x0a, 0x0a, 0x04, 0x54, 0x61, 0x73, 0x6b, 0x12, + 0x4c, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x57, 0x0a, + 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x25, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, + 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, + 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, + 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x21, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, + 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4f, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x21, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, + 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, + 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x04, 0x50, 0x69, 0x64, 0x73, 0x12, 0x1f, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, + 0x76, 0x32, 0x2e, 0x50, 0x69, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, + 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x69, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x41, 0x0a, 0x05, 0x50, 0x61, 0x75, 0x73, 0x65, 0x12, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x50, + 0x61, 0x75, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x12, 0x43, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x12, 0x21, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, + 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4f, 0x0a, 0x0a, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x04, 0x4b, 0x69, 0x6c, + 0x6c, 0x12, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, + 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x4b, 0x69, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x46, 0x0a, 0x04, 0x45, 0x78, + 0x65, 0x63, 0x12, 0x26, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x50, 0x72, 0x6f, 0x63, + 0x65, 0x73, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x12, 0x49, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x69, 0x7a, 0x65, 0x50, 0x74, 0x79, 0x12, + 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, + 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x69, 0x7a, 0x65, 0x50, 0x74, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x45, 0x0a, + 0x07, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x49, 0x4f, 0x12, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, + 0x6f, 0x73, 0x65, 0x49, 0x4f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x12, 0x47, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x25, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, + 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x49, 0x0a, + 0x04, 0x57, 0x61, 0x69, 0x74, 0x12, 0x1f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x61, 0x69, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x61, 0x69, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, + 0x73, 0x12, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, + 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x12, 0x22, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, + 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x08, 0x53, 0x68, + 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x12, 0x23, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x68, 0x75, 0x74, + 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x42, 0x3b, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x75, 0x6e, 0x74, + 0x69, 0x6d, 0x65, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x2f, 0x76, 0x32, 0x3b, 0x74, 0x61, 0x73, 0x6b, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescData = file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDesc +) + +func file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes = make([]protoimpl.MessageInfo, 27) +var file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_goTypes = []interface{}{ + (*CreateTaskRequest)(nil), // 0: containerd.task.v2.CreateTaskRequest + (*CreateTaskResponse)(nil), // 1: containerd.task.v2.CreateTaskResponse + (*DeleteRequest)(nil), // 2: containerd.task.v2.DeleteRequest + (*DeleteResponse)(nil), // 3: containerd.task.v2.DeleteResponse + (*ExecProcessRequest)(nil), // 4: containerd.task.v2.ExecProcessRequest + (*ExecProcessResponse)(nil), // 5: containerd.task.v2.ExecProcessResponse + (*ResizePtyRequest)(nil), // 6: containerd.task.v2.ResizePtyRequest + (*StateRequest)(nil), // 7: containerd.task.v2.StateRequest + (*StateResponse)(nil), // 8: containerd.task.v2.StateResponse + (*KillRequest)(nil), // 9: containerd.task.v2.KillRequest + (*CloseIORequest)(nil), // 10: containerd.task.v2.CloseIORequest + (*PidsRequest)(nil), // 11: containerd.task.v2.PidsRequest + (*PidsResponse)(nil), // 12: containerd.task.v2.PidsResponse + (*CheckpointTaskRequest)(nil), // 13: containerd.task.v2.CheckpointTaskRequest + (*UpdateTaskRequest)(nil), // 14: containerd.task.v2.UpdateTaskRequest + (*StartRequest)(nil), // 15: containerd.task.v2.StartRequest + (*StartResponse)(nil), // 16: containerd.task.v2.StartResponse + (*WaitRequest)(nil), // 17: containerd.task.v2.WaitRequest + (*WaitResponse)(nil), // 18: containerd.task.v2.WaitResponse + (*StatsRequest)(nil), // 19: containerd.task.v2.StatsRequest + (*StatsResponse)(nil), // 20: containerd.task.v2.StatsResponse + (*ConnectRequest)(nil), // 21: containerd.task.v2.ConnectRequest + (*ConnectResponse)(nil), // 22: containerd.task.v2.ConnectResponse + (*ShutdownRequest)(nil), // 23: containerd.task.v2.ShutdownRequest + (*PauseRequest)(nil), // 24: containerd.task.v2.PauseRequest + (*ResumeRequest)(nil), // 25: containerd.task.v2.ResumeRequest + nil, // 26: containerd.task.v2.UpdateTaskRequest.AnnotationsEntry + (*types.Mount)(nil), // 27: containerd.types.Mount + (*anypb.Any)(nil), // 28: google.protobuf.Any + (*timestamppb.Timestamp)(nil), // 29: google.protobuf.Timestamp + (task.Status)(0), // 30: containerd.v1.types.Status + (*task.ProcessInfo)(nil), // 31: containerd.v1.types.ProcessInfo + (*emptypb.Empty)(nil), // 32: google.protobuf.Empty +} +var file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_depIdxs = []int32{ + 27, // 0: containerd.task.v2.CreateTaskRequest.rootfs:type_name -> containerd.types.Mount + 28, // 1: containerd.task.v2.CreateTaskRequest.options:type_name -> google.protobuf.Any + 29, // 2: containerd.task.v2.DeleteResponse.exited_at:type_name -> google.protobuf.Timestamp + 28, // 3: containerd.task.v2.ExecProcessRequest.spec:type_name -> google.protobuf.Any + 30, // 4: containerd.task.v2.StateResponse.status:type_name -> containerd.v1.types.Status + 29, // 5: containerd.task.v2.StateResponse.exited_at:type_name -> google.protobuf.Timestamp + 31, // 6: containerd.task.v2.PidsResponse.processes:type_name -> containerd.v1.types.ProcessInfo + 28, // 7: containerd.task.v2.CheckpointTaskRequest.options:type_name -> google.protobuf.Any + 28, // 8: containerd.task.v2.UpdateTaskRequest.resources:type_name -> google.protobuf.Any + 26, // 9: containerd.task.v2.UpdateTaskRequest.annotations:type_name -> containerd.task.v2.UpdateTaskRequest.AnnotationsEntry + 29, // 10: containerd.task.v2.WaitResponse.exited_at:type_name -> google.protobuf.Timestamp + 28, // 11: containerd.task.v2.StatsResponse.stats:type_name -> google.protobuf.Any + 7, // 12: containerd.task.v2.Task.State:input_type -> containerd.task.v2.StateRequest + 0, // 13: containerd.task.v2.Task.Create:input_type -> containerd.task.v2.CreateTaskRequest + 15, // 14: containerd.task.v2.Task.Start:input_type -> containerd.task.v2.StartRequest + 2, // 15: containerd.task.v2.Task.Delete:input_type -> containerd.task.v2.DeleteRequest + 11, // 16: containerd.task.v2.Task.Pids:input_type -> containerd.task.v2.PidsRequest + 24, // 17: containerd.task.v2.Task.Pause:input_type -> containerd.task.v2.PauseRequest + 25, // 18: containerd.task.v2.Task.Resume:input_type -> containerd.task.v2.ResumeRequest + 13, // 19: containerd.task.v2.Task.Checkpoint:input_type -> containerd.task.v2.CheckpointTaskRequest + 9, // 20: containerd.task.v2.Task.Kill:input_type -> containerd.task.v2.KillRequest + 4, // 21: containerd.task.v2.Task.Exec:input_type -> containerd.task.v2.ExecProcessRequest + 6, // 22: containerd.task.v2.Task.ResizePty:input_type -> containerd.task.v2.ResizePtyRequest + 10, // 23: containerd.task.v2.Task.CloseIO:input_type -> containerd.task.v2.CloseIORequest + 14, // 24: containerd.task.v2.Task.Update:input_type -> containerd.task.v2.UpdateTaskRequest + 17, // 25: containerd.task.v2.Task.Wait:input_type -> containerd.task.v2.WaitRequest + 19, // 26: containerd.task.v2.Task.Stats:input_type -> containerd.task.v2.StatsRequest + 21, // 27: containerd.task.v2.Task.Connect:input_type -> containerd.task.v2.ConnectRequest + 23, // 28: containerd.task.v2.Task.Shutdown:input_type -> containerd.task.v2.ShutdownRequest + 8, // 29: containerd.task.v2.Task.State:output_type -> containerd.task.v2.StateResponse + 1, // 30: containerd.task.v2.Task.Create:output_type -> containerd.task.v2.CreateTaskResponse + 16, // 31: containerd.task.v2.Task.Start:output_type -> containerd.task.v2.StartResponse + 3, // 32: containerd.task.v2.Task.Delete:output_type -> containerd.task.v2.DeleteResponse + 12, // 33: containerd.task.v2.Task.Pids:output_type -> containerd.task.v2.PidsResponse + 32, // 34: containerd.task.v2.Task.Pause:output_type -> google.protobuf.Empty + 32, // 35: containerd.task.v2.Task.Resume:output_type -> google.protobuf.Empty + 32, // 36: containerd.task.v2.Task.Checkpoint:output_type -> google.protobuf.Empty + 32, // 37: containerd.task.v2.Task.Kill:output_type -> google.protobuf.Empty + 32, // 38: containerd.task.v2.Task.Exec:output_type -> google.protobuf.Empty + 32, // 39: containerd.task.v2.Task.ResizePty:output_type -> google.protobuf.Empty + 32, // 40: containerd.task.v2.Task.CloseIO:output_type -> google.protobuf.Empty + 32, // 41: containerd.task.v2.Task.Update:output_type -> google.protobuf.Empty + 18, // 42: containerd.task.v2.Task.Wait:output_type -> containerd.task.v2.WaitResponse + 20, // 43: containerd.task.v2.Task.Stats:output_type -> containerd.task.v2.StatsResponse + 22, // 44: containerd.task.v2.Task.Connect:output_type -> containerd.task.v2.ConnectResponse + 32, // 45: containerd.task.v2.Task.Shutdown:output_type -> google.protobuf.Empty + 29, // [29:46] is the sub-list for method output_type + 12, // [12:29] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_init() } +func file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_init() { + if File_github_com_containerd_containerd_api_runtime_task_v2_shim_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateTaskRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateTaskResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecProcessRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecProcessResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResizePtyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KillRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CloseIORequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PidsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PidsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckpointTaskRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateTaskRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StartRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StartResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WaitRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WaitResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConnectRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConnectResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ShutdownRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PauseRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResumeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDesc, + NumEnums: 0, + NumMessages: 27, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_runtime_task_v2_shim_proto = out.File + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_rawDesc = nil + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_goTypes = nil + file_github_com_containerd_containerd_api_runtime_task_v2_shim_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/runtime/v2/task/shim.proto b/vendor/github.com/containerd/containerd/api/runtime/task/v2/shim.proto similarity index 91% rename from vendor/github.com/containerd/containerd/runtime/v2/task/shim.proto rename to vendor/github.com/containerd/containerd/api/runtime/task/v2/shim.proto index df77d578261cd..aad1bd66fdf71 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/task/shim.proto +++ b/vendor/github.com/containerd/containerd/api/runtime/task/v2/shim.proto @@ -20,12 +20,11 @@ package containerd.task.v2; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; -import weak "gogoproto/gogo.proto"; import "google/protobuf/timestamp.proto"; import "github.com/containerd/containerd/api/types/mount.proto"; import "github.com/containerd/containerd/api/types/task/task.proto"; -option go_package = "github.com/containerd/containerd/runtime/v2/task;task"; +option go_package = "github.com/containerd/containerd/api/runtime/task/v2;task"; // Shim service is launched for each container and is responsible for owning the IO // for the container and its additional processes. The shim is also the parent of @@ -76,7 +75,7 @@ message DeleteRequest { message DeleteResponse { uint32 pid = 1; uint32 exit_status = 2; - google.protobuf.Timestamp exited_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp exited_at = 3; } message ExecProcessRequest { @@ -114,7 +113,7 @@ message StateResponse { string stderr = 7; bool terminal = 8; uint32 exit_status = 9; - google.protobuf.Timestamp exited_at = 10 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp exited_at = 10; string exec_id = 11; } @@ -167,7 +166,7 @@ message WaitRequest { message WaitResponse { uint32 exit_status = 1; - google.protobuf.Timestamp exited_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp exited_at = 2; } message StatsRequest { diff --git a/vendor/github.com/containerd/containerd/api/runtime/task/v2/shim_ttrpc.pb.go b/vendor/github.com/containerd/containerd/api/runtime/task/v2/shim_ttrpc.pb.go new file mode 100644 index 0000000000000..1210371c2c95b --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/runtime/task/v2/shim_ttrpc.pb.go @@ -0,0 +1,301 @@ +// Code generated by protoc-gen-go-ttrpc. DO NOT EDIT. +// source: github.com/containerd/containerd/api/runtime/task/v2/shim.proto +package task + +import ( + context "context" + ttrpc "github.com/containerd/ttrpc" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +type TaskService interface { + State(context.Context, *StateRequest) (*StateResponse, error) + Create(context.Context, *CreateTaskRequest) (*CreateTaskResponse, error) + Start(context.Context, *StartRequest) (*StartResponse, error) + Delete(context.Context, *DeleteRequest) (*DeleteResponse, error) + Pids(context.Context, *PidsRequest) (*PidsResponse, error) + Pause(context.Context, *PauseRequest) (*emptypb.Empty, error) + Resume(context.Context, *ResumeRequest) (*emptypb.Empty, error) + Checkpoint(context.Context, *CheckpointTaskRequest) (*emptypb.Empty, error) + Kill(context.Context, *KillRequest) (*emptypb.Empty, error) + Exec(context.Context, *ExecProcessRequest) (*emptypb.Empty, error) + ResizePty(context.Context, *ResizePtyRequest) (*emptypb.Empty, error) + CloseIO(context.Context, *CloseIORequest) (*emptypb.Empty, error) + Update(context.Context, *UpdateTaskRequest) (*emptypb.Empty, error) + Wait(context.Context, *WaitRequest) (*WaitResponse, error) + Stats(context.Context, *StatsRequest) (*StatsResponse, error) + Connect(context.Context, *ConnectRequest) (*ConnectResponse, error) + Shutdown(context.Context, *ShutdownRequest) (*emptypb.Empty, error) +} + +func RegisterTaskService(srv *ttrpc.Server, svc TaskService) { + srv.RegisterService("containerd.task.v2.Task", &ttrpc.ServiceDesc{ + Methods: map[string]ttrpc.Method{ + "State": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req StateRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.State(ctx, &req) + }, + "Create": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req CreateTaskRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Create(ctx, &req) + }, + "Start": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req StartRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Start(ctx, &req) + }, + "Delete": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req DeleteRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Delete(ctx, &req) + }, + "Pids": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req PidsRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Pids(ctx, &req) + }, + "Pause": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req PauseRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Pause(ctx, &req) + }, + "Resume": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req ResumeRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Resume(ctx, &req) + }, + "Checkpoint": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req CheckpointTaskRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Checkpoint(ctx, &req) + }, + "Kill": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req KillRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Kill(ctx, &req) + }, + "Exec": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req ExecProcessRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Exec(ctx, &req) + }, + "ResizePty": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req ResizePtyRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.ResizePty(ctx, &req) + }, + "CloseIO": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req CloseIORequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.CloseIO(ctx, &req) + }, + "Update": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req UpdateTaskRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Update(ctx, &req) + }, + "Wait": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req WaitRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Wait(ctx, &req) + }, + "Stats": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req StatsRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Stats(ctx, &req) + }, + "Connect": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req ConnectRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Connect(ctx, &req) + }, + "Shutdown": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req ShutdownRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Shutdown(ctx, &req) + }, + }, + }) +} + +type taskClient struct { + client *ttrpc.Client +} + +func NewTaskClient(client *ttrpc.Client) TaskService { + return &taskClient{ + client: client, + } +} + +func (c *taskClient) State(ctx context.Context, req *StateRequest) (*StateResponse, error) { + var resp StateResponse + if err := c.client.Call(ctx, "containerd.task.v2.Task", "State", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Create(ctx context.Context, req *CreateTaskRequest) (*CreateTaskResponse, error) { + var resp CreateTaskResponse + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Create", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Start(ctx context.Context, req *StartRequest) (*StartResponse, error) { + var resp StartResponse + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Start", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Delete(ctx context.Context, req *DeleteRequest) (*DeleteResponse, error) { + var resp DeleteResponse + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Delete", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Pids(ctx context.Context, req *PidsRequest) (*PidsResponse, error) { + var resp PidsResponse + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Pids", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Pause(ctx context.Context, req *PauseRequest) (*emptypb.Empty, error) { + var resp emptypb.Empty + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Pause", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Resume(ctx context.Context, req *ResumeRequest) (*emptypb.Empty, error) { + var resp emptypb.Empty + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Resume", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Checkpoint(ctx context.Context, req *CheckpointTaskRequest) (*emptypb.Empty, error) { + var resp emptypb.Empty + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Checkpoint", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Kill(ctx context.Context, req *KillRequest) (*emptypb.Empty, error) { + var resp emptypb.Empty + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Kill", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Exec(ctx context.Context, req *ExecProcessRequest) (*emptypb.Empty, error) { + var resp emptypb.Empty + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Exec", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) ResizePty(ctx context.Context, req *ResizePtyRequest) (*emptypb.Empty, error) { + var resp emptypb.Empty + if err := c.client.Call(ctx, "containerd.task.v2.Task", "ResizePty", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) CloseIO(ctx context.Context, req *CloseIORequest) (*emptypb.Empty, error) { + var resp emptypb.Empty + if err := c.client.Call(ctx, "containerd.task.v2.Task", "CloseIO", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Update(ctx context.Context, req *UpdateTaskRequest) (*emptypb.Empty, error) { + var resp emptypb.Empty + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Update", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Wait(ctx context.Context, req *WaitRequest) (*WaitResponse, error) { + var resp WaitResponse + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Wait", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Stats(ctx context.Context, req *StatsRequest) (*StatsResponse, error) { + var resp StatsResponse + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Stats", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Connect(ctx context.Context, req *ConnectRequest) (*ConnectResponse, error) { + var resp ConnectResponse + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Connect", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +func (c *taskClient) Shutdown(ctx context.Context, req *ShutdownRequest) (*emptypb.Empty, error) { + var resp emptypb.Empty + if err := c.client.Call(ctx, "containerd.task.v2.Task", "Shutdown", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} diff --git a/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go index af56c7de2badb..aab9e45b12641 100644 --- a/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go @@ -1,39 +1,49 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/services/containers/v1/containers.proto package containers import ( - context "context" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types "github.com/gogo/protobuf/types" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - strings "strings" - time "time" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type Container struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // ID is the user-specified identifier. // // This field may not be updated. @@ -53,7 +63,7 @@ type Container struct { // Runtime specifies which runtime to use for executing this container. Runtime *Container_Runtime `protobuf:"bytes,4,opt,name=runtime,proto3" json:"runtime,omitempty"` // Spec to be used when creating the container. This is runtime specific. - Spec *types.Any `protobuf:"bytes,5,opt,name=spec,proto3" json:"spec,omitempty"` + Spec *anypb.Any `protobuf:"bytes,5,opt,name=spec,proto3" json:"spec,omitempty"` // Snapshotter specifies the snapshotter name used for rootfs Snapshotter string `protobuf:"bytes,6,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` // SnapshotKey specifies the snapshot key to use for the container's root @@ -68,9 +78,9 @@ type Container struct { // This field may be updated. SnapshotKey string `protobuf:"bytes,7,opt,name=snapshot_key,json=snapshotKey,proto3" json:"snapshot_key,omitempty"` // CreatedAt is the time the container was first created. - CreatedAt time.Time `protobuf:"bytes,8,opt,name=created_at,json=createdAt,proto3,stdtime" json:"created_at"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` // UpdatedAt is the last time the container was mutated. - UpdatedAt time.Time `protobuf:"bytes,9,opt,name=updated_at,json=updatedAt,proto3,stdtime" json:"updated_at"` + UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` // Extensions allow clients to provide zero or more blobs that are directly // associated with the container. One may provide protobuf, json, or other // encoding formats. The primary use of this is to further decorate the @@ -80,165 +90,219 @@ type Container struct { // that should be unique against other extensions. When updating extension // data, one should only update the specified extension using field paths // to select a specific map key. - Extensions map[string]types.Any `protobuf:"bytes,10,rep,name=extensions,proto3" json:"extensions" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Extensions map[string]*anypb.Any `protobuf:"bytes,10,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Sandbox ID this container belongs to. + Sandbox string `protobuf:"bytes,11,opt,name=sandbox,proto3" json:"sandbox,omitempty"` +} + +func (x *Container) Reset() { + *x = Container{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Container) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Container) Reset() { *m = Container{} } func (*Container) ProtoMessage() {} -func (*Container) Descriptor() ([]byte, []int) { - return fileDescriptor_311afb8e15951042, []int{0} -} -func (m *Container) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Container) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Container.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *Container) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *Container) XXX_Merge(src proto.Message) { - xxx_messageInfo_Container.Merge(m, src) + +// Deprecated: Use Container.ProtoReflect.Descriptor instead. +func (*Container) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{0} } -func (m *Container) XXX_Size() int { - return m.Size() + +func (x *Container) GetID() string { + if x != nil { + return x.ID + } + return "" } -func (m *Container) XXX_DiscardUnknown() { - xxx_messageInfo_Container.DiscardUnknown(m) + +func (x *Container) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil } -var xxx_messageInfo_Container proto.InternalMessageInfo +func (x *Container) GetImage() string { + if x != nil { + return x.Image + } + return "" +} -type Container_Runtime struct { - // Name is the name of the runtime. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Options specify additional runtime initialization options. - Options *types.Any `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *Container) GetRuntime() *Container_Runtime { + if x != nil { + return x.Runtime + } + return nil } -func (m *Container_Runtime) Reset() { *m = Container_Runtime{} } -func (*Container_Runtime) ProtoMessage() {} -func (*Container_Runtime) Descriptor() ([]byte, []int) { - return fileDescriptor_311afb8e15951042, []int{0, 1} -} -func (m *Container_Runtime) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Container_Runtime) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Container_Runtime.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *Container) GetSpec() *anypb.Any { + if x != nil { + return x.Spec } + return nil +} + +func (x *Container) GetSnapshotter() string { + if x != nil { + return x.Snapshotter + } + return "" } -func (m *Container_Runtime) XXX_Merge(src proto.Message) { - xxx_messageInfo_Container_Runtime.Merge(m, src) + +func (x *Container) GetSnapshotKey() string { + if x != nil { + return x.SnapshotKey + } + return "" +} + +func (x *Container) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil } -func (m *Container_Runtime) XXX_Size() int { - return m.Size() + +func (x *Container) GetUpdatedAt() *timestamppb.Timestamp { + if x != nil { + return x.UpdatedAt + } + return nil } -func (m *Container_Runtime) XXX_DiscardUnknown() { - xxx_messageInfo_Container_Runtime.DiscardUnknown(m) + +func (x *Container) GetExtensions() map[string]*anypb.Any { + if x != nil { + return x.Extensions + } + return nil } -var xxx_messageInfo_Container_Runtime proto.InternalMessageInfo +func (x *Container) GetSandbox() string { + if x != nil { + return x.Sandbox + } + return "" +} type GetContainerRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *GetContainerRequest) Reset() { + *x = GetContainerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetContainerRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetContainerRequest) Reset() { *m = GetContainerRequest{} } func (*GetContainerRequest) ProtoMessage() {} -func (*GetContainerRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_311afb8e15951042, []int{1} -} -func (m *GetContainerRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetContainerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetContainerRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *GetContainerRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *GetContainerRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetContainerRequest.Merge(m, src) + +// Deprecated: Use GetContainerRequest.ProtoReflect.Descriptor instead. +func (*GetContainerRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{1} } -func (m *GetContainerRequest) XXX_Size() int { - return m.Size() + +func (x *GetContainerRequest) GetID() string { + if x != nil { + return x.ID + } + return "" } -func (m *GetContainerRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetContainerRequest.DiscardUnknown(m) + +type GetContainerResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Container *Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container,omitempty"` } -var xxx_messageInfo_GetContainerRequest proto.InternalMessageInfo +func (x *GetContainerResponse) Reset() { + *x = GetContainerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type GetContainerResponse struct { - Container Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *GetContainerResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetContainerResponse) Reset() { *m = GetContainerResponse{} } func (*GetContainerResponse) ProtoMessage() {} -func (*GetContainerResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_311afb8e15951042, []int{2} -} -func (m *GetContainerResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetContainerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetContainerResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *GetContainerResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *GetContainerResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetContainerResponse.Merge(m, src) -} -func (m *GetContainerResponse) XXX_Size() int { - return m.Size() -} -func (m *GetContainerResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetContainerResponse.DiscardUnknown(m) + +// Deprecated: Use GetContainerResponse.ProtoReflect.Descriptor instead. +func (*GetContainerResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{2} } -var xxx_messageInfo_GetContainerResponse proto.InternalMessageInfo +func (x *GetContainerResponse) GetContainer() *Container { + if x != nil { + return x.Container + } + return nil +} type ListContainersRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Filters contains one or more filters using the syntax defined in the // containerd filter package. // @@ -246,163 +310,191 @@ type ListContainersRequest struct { // filters. Expanded, containers that match the following will be // returned: // - // filters[0] or filters[1] or ... or filters[n-1] or filters[n] + // filters[0] or filters[1] or ... or filters[n-1] or filters[n] // // If filters is zero-length or nil, all items will be returned. - Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` +} + +func (x *ListContainersRequest) Reset() { + *x = ListContainersRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListContainersRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ListContainersRequest) Reset() { *m = ListContainersRequest{} } func (*ListContainersRequest) ProtoMessage() {} -func (*ListContainersRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_311afb8e15951042, []int{3} -} -func (m *ListContainersRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListContainersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListContainersRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *ListContainersRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *ListContainersRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListContainersRequest.Merge(m, src) + +// Deprecated: Use ListContainersRequest.ProtoReflect.Descriptor instead. +func (*ListContainersRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{3} } -func (m *ListContainersRequest) XXX_Size() int { - return m.Size() + +func (x *ListContainersRequest) GetFilters() []string { + if x != nil { + return x.Filters + } + return nil } -func (m *ListContainersRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListContainersRequest.DiscardUnknown(m) + +type ListContainersResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Containers []*Container `protobuf:"bytes,1,rep,name=containers,proto3" json:"containers,omitempty"` } -var xxx_messageInfo_ListContainersRequest proto.InternalMessageInfo +func (x *ListContainersResponse) Reset() { + *x = ListContainersResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type ListContainersResponse struct { - Containers []Container `protobuf:"bytes,1,rep,name=containers,proto3" json:"containers"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *ListContainersResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ListContainersResponse) Reset() { *m = ListContainersResponse{} } func (*ListContainersResponse) ProtoMessage() {} -func (*ListContainersResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_311afb8e15951042, []int{4} -} -func (m *ListContainersResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListContainersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListContainersResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *ListContainersResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *ListContainersResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListContainersResponse.Merge(m, src) + +// Deprecated: Use ListContainersResponse.ProtoReflect.Descriptor instead. +func (*ListContainersResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{4} } -func (m *ListContainersResponse) XXX_Size() int { - return m.Size() + +func (x *ListContainersResponse) GetContainers() []*Container { + if x != nil { + return x.Containers + } + return nil } -func (m *ListContainersResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListContainersResponse.DiscardUnknown(m) + +type CreateContainerRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Container *Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container,omitempty"` } -var xxx_messageInfo_ListContainersResponse proto.InternalMessageInfo +func (x *CreateContainerRequest) Reset() { + *x = CreateContainerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type CreateContainerRequest struct { - Container Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *CreateContainerRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *CreateContainerRequest) Reset() { *m = CreateContainerRequest{} } func (*CreateContainerRequest) ProtoMessage() {} -func (*CreateContainerRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_311afb8e15951042, []int{5} -} -func (m *CreateContainerRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CreateContainerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CreateContainerRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *CreateContainerRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *CreateContainerRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateContainerRequest.Merge(m, src) + +// Deprecated: Use CreateContainerRequest.ProtoReflect.Descriptor instead. +func (*CreateContainerRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{5} } -func (m *CreateContainerRequest) XXX_Size() int { - return m.Size() + +func (x *CreateContainerRequest) GetContainer() *Container { + if x != nil { + return x.Container + } + return nil } -func (m *CreateContainerRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CreateContainerRequest.DiscardUnknown(m) + +type CreateContainerResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Container *Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container,omitempty"` } -var xxx_messageInfo_CreateContainerRequest proto.InternalMessageInfo +func (x *CreateContainerResponse) Reset() { + *x = CreateContainerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type CreateContainerResponse struct { - Container Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *CreateContainerResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *CreateContainerResponse) Reset() { *m = CreateContainerResponse{} } func (*CreateContainerResponse) ProtoMessage() {} -func (*CreateContainerResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_311afb8e15951042, []int{6} -} -func (m *CreateContainerResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CreateContainerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CreateContainerResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *CreateContainerResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *CreateContainerResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateContainerResponse.Merge(m, src) -} -func (m *CreateContainerResponse) XXX_Size() int { - return m.Size() -} -func (m *CreateContainerResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CreateContainerResponse.DiscardUnknown(m) + +// Deprecated: Use CreateContainerResponse.ProtoReflect.Descriptor instead. +func (*CreateContainerResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{6} } -var xxx_messageInfo_CreateContainerResponse proto.InternalMessageInfo +func (x *CreateContainerResponse) GetContainer() *Container { + if x != nil { + return x.Container + } + return nil +} // UpdateContainerRequest updates the metadata on one or more container. // @@ -410,3175 +502,677 @@ var xxx_messageInfo_CreateContainerResponse proto.InternalMessageInfo // https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-mask, // unless otherwise qualified. type UpdateContainerRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Container provides the target values, as declared by the mask, for the update. // // The ID field must be set. - Container Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container"` + Container *Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container,omitempty"` // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. - UpdateMask *types.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` } -func (m *UpdateContainerRequest) Reset() { *m = UpdateContainerRequest{} } -func (*UpdateContainerRequest) ProtoMessage() {} -func (*UpdateContainerRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_311afb8e15951042, []int{7} -} -func (m *UpdateContainerRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UpdateContainerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UpdateContainerRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *UpdateContainerRequest) Reset() { + *x = UpdateContainerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *UpdateContainerRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateContainerRequest.Merge(m, src) -} -func (m *UpdateContainerRequest) XXX_Size() int { - return m.Size() -} -func (m *UpdateContainerRequest) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateContainerRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_UpdateContainerRequest proto.InternalMessageInfo -type UpdateContainerResponse struct { - Container Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *UpdateContainerRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *UpdateContainerResponse) Reset() { *m = UpdateContainerResponse{} } -func (*UpdateContainerResponse) ProtoMessage() {} -func (*UpdateContainerResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_311afb8e15951042, []int{8} -} -func (m *UpdateContainerResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UpdateContainerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UpdateContainerResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*UpdateContainerRequest) ProtoMessage() {} + +func (x *UpdateContainerRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *UpdateContainerResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateContainerResponse.Merge(m, src) -} -func (m *UpdateContainerResponse) XXX_Size() int { - return m.Size() -} -func (m *UpdateContainerResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateContainerResponse.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_UpdateContainerResponse proto.InternalMessageInfo - -type DeleteContainerRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +// Deprecated: Use UpdateContainerRequest.ProtoReflect.Descriptor instead. +func (*UpdateContainerRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{7} } -func (m *DeleteContainerRequest) Reset() { *m = DeleteContainerRequest{} } -func (*DeleteContainerRequest) ProtoMessage() {} -func (*DeleteContainerRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_311afb8e15951042, []int{9} -} -func (m *DeleteContainerRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteContainerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteContainerRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *UpdateContainerRequest) GetContainer() *Container { + if x != nil { + return x.Container } + return nil } -func (m *DeleteContainerRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteContainerRequest.Merge(m, src) -} -func (m *DeleteContainerRequest) XXX_Size() int { - return m.Size() -} -func (m *DeleteContainerRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteContainerRequest.DiscardUnknown(m) + +func (x *UpdateContainerRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil } -var xxx_messageInfo_DeleteContainerRequest proto.InternalMessageInfo +type UpdateContainerResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -type ListContainerMessage struct { - Container *Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Container *Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container,omitempty"` } -func (m *ListContainerMessage) Reset() { *m = ListContainerMessage{} } -func (*ListContainerMessage) ProtoMessage() {} -func (*ListContainerMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_311afb8e15951042, []int{10} -} -func (m *ListContainerMessage) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListContainerMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListContainerMessage.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *UpdateContainerResponse) Reset() { + *x = UpdateContainerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ListContainerMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListContainerMessage.Merge(m, src) -} -func (m *ListContainerMessage) XXX_Size() int { - return m.Size() -} -func (m *ListContainerMessage) XXX_DiscardUnknown() { - xxx_messageInfo_ListContainerMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_ListContainerMessage proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Container)(nil), "containerd.services.containers.v1.Container") - proto.RegisterMapType((map[string]types.Any)(nil), "containerd.services.containers.v1.Container.ExtensionsEntry") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.containers.v1.Container.LabelsEntry") - proto.RegisterType((*Container_Runtime)(nil), "containerd.services.containers.v1.Container.Runtime") - proto.RegisterType((*GetContainerRequest)(nil), "containerd.services.containers.v1.GetContainerRequest") - proto.RegisterType((*GetContainerResponse)(nil), "containerd.services.containers.v1.GetContainerResponse") - proto.RegisterType((*ListContainersRequest)(nil), "containerd.services.containers.v1.ListContainersRequest") - proto.RegisterType((*ListContainersResponse)(nil), "containerd.services.containers.v1.ListContainersResponse") - proto.RegisterType((*CreateContainerRequest)(nil), "containerd.services.containers.v1.CreateContainerRequest") - proto.RegisterType((*CreateContainerResponse)(nil), "containerd.services.containers.v1.CreateContainerResponse") - proto.RegisterType((*UpdateContainerRequest)(nil), "containerd.services.containers.v1.UpdateContainerRequest") - proto.RegisterType((*UpdateContainerResponse)(nil), "containerd.services.containers.v1.UpdateContainerResponse") - proto.RegisterType((*DeleteContainerRequest)(nil), "containerd.services.containers.v1.DeleteContainerRequest") - proto.RegisterType((*ListContainerMessage)(nil), "containerd.services.containers.v1.ListContainerMessage") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/containers/v1/containers.proto", fileDescriptor_311afb8e15951042) -} - -var fileDescriptor_311afb8e15951042 = []byte{ - // 820 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcb, 0x6e, 0x13, 0x49, - 0x14, 0x75, 0xdb, 0x4e, 0x3b, 0xbe, 0x1e, 0x69, 0x46, 0x35, 0x1e, 0x4f, 0x4f, 0x8f, 0x64, 0x3b, - 0x5e, 0x59, 0xa3, 0xa1, 0x9d, 0x18, 0x44, 0x5e, 0x6c, 0xe2, 0xbc, 0x04, 0x24, 0x28, 0xea, 0x80, - 0x84, 0x60, 0x11, 0xda, 0x76, 0xc5, 0x69, 0xdc, 0x2f, 0xba, 0xca, 0x16, 0x16, 0x8b, 0xc0, 0x1f, - 0xb0, 0xe3, 0x13, 0xf8, 0x95, 0x2c, 0x59, 0xb2, 0x0a, 0xc4, 0xe2, 0x43, 0x50, 0x57, 0x57, 0xbb, - 0x3b, 0x7e, 0x80, 0x9d, 0x90, 0x5d, 0x5d, 0xd7, 0x3d, 0xf7, 0x9e, 0x3a, 0xb7, 0x4e, 0xb9, 0x61, - 0xaf, 0xa5, 0xd3, 0x93, 0x4e, 0x5d, 0x69, 0xd8, 0x66, 0xa5, 0x61, 0x5b, 0x54, 0xd3, 0x2d, 0xec, - 0x36, 0xa3, 0x4b, 0xcd, 0xd1, 0x2b, 0x04, 0xbb, 0x5d, 0xbd, 0x81, 0x49, 0xf8, 0x3b, 0xa9, 0x74, - 0x97, 0x22, 0x91, 0xe2, 0xb8, 0x36, 0xb5, 0xd1, 0x42, 0x88, 0x53, 0x02, 0x8c, 0x12, 0xc9, 0xea, - 0x2e, 0xc9, 0xd9, 0x96, 0xdd, 0xb2, 0x59, 0x76, 0xc5, 0x5b, 0xf9, 0x40, 0xf9, 0x9f, 0x96, 0x6d, - 0xb7, 0x0c, 0x5c, 0x61, 0x51, 0xbd, 0x73, 0x5c, 0xd1, 0xac, 0x1e, 0xdf, 0xfa, 0x77, 0x78, 0x0b, - 0x9b, 0x0e, 0x0d, 0x36, 0x8b, 0xc3, 0x9b, 0xc7, 0x3a, 0x36, 0x9a, 0x47, 0xa6, 0x46, 0xda, 0x3c, - 0xa3, 0x30, 0x9c, 0x41, 0x75, 0x13, 0x13, 0xaa, 0x99, 0x8e, 0x9f, 0x50, 0xfa, 0x20, 0x42, 0x7a, - 0x33, 0xa0, 0x88, 0x72, 0x10, 0xd7, 0x9b, 0x92, 0x50, 0x14, 0xca, 0xe9, 0x9a, 0xd8, 0x3f, 0x2f, - 0xc4, 0xef, 0x6f, 0xa9, 0x71, 0xbd, 0x89, 0x0e, 0x40, 0x34, 0xb4, 0x3a, 0x36, 0x88, 0x14, 0x2f, - 0x26, 0xca, 0x99, 0xea, 0x8a, 0xf2, 0xd3, 0xa3, 0x2a, 0x83, 0xaa, 0xca, 0x1e, 0x83, 0x6e, 0x5b, - 0xd4, 0xed, 0xa9, 0xbc, 0x0e, 0xca, 0xc2, 0x9c, 0x6e, 0x6a, 0x2d, 0x2c, 0x25, 0xbc, 0x66, 0xaa, - 0x1f, 0xa0, 0x47, 0x90, 0x72, 0x3b, 0x96, 0xc7, 0x51, 0x4a, 0x16, 0x85, 0x72, 0xa6, 0x7a, 0x67, - 0xa6, 0x46, 0xaa, 0x8f, 0x55, 0x83, 0x22, 0xa8, 0x0c, 0x49, 0xe2, 0xe0, 0x86, 0x34, 0xc7, 0x8a, - 0x65, 0x15, 0x5f, 0x0d, 0x25, 0x50, 0x43, 0xd9, 0xb0, 0x7a, 0x2a, 0xcb, 0x40, 0x45, 0xc8, 0x10, - 0x4b, 0x73, 0xc8, 0x89, 0x4d, 0x29, 0x76, 0x25, 0x91, 0xb1, 0x8a, 0xfe, 0x84, 0x16, 0xe0, 0xb7, - 0x20, 0x3c, 0x6a, 0xe3, 0x9e, 0x94, 0xba, 0x9c, 0xf2, 0x10, 0xf7, 0xd0, 0x26, 0x40, 0xc3, 0xc5, - 0x1a, 0xc5, 0xcd, 0x23, 0x8d, 0x4a, 0xf3, 0xac, 0xa9, 0x3c, 0xd2, 0xf4, 0x71, 0x30, 0x82, 0xda, - 0xfc, 0xd9, 0x79, 0x21, 0xf6, 0xfe, 0x4b, 0x41, 0x50, 0xd3, 0x1c, 0xb7, 0x41, 0xbd, 0x22, 0x1d, - 0xa7, 0x19, 0x14, 0x49, 0xcf, 0x52, 0x84, 0xe3, 0x36, 0x28, 0xaa, 0x03, 0xe0, 0xd7, 0x14, 0x5b, - 0x44, 0xb7, 0x2d, 0x22, 0x01, 0x1b, 0xda, 0xbd, 0x99, 0xb4, 0xdc, 0x1e, 0xc0, 0xd9, 0xe0, 0x6a, - 0x49, 0xaf, 0x8d, 0x1a, 0xa9, 0x2a, 0xaf, 0x42, 0x26, 0x32, 0x59, 0xf4, 0x07, 0x24, 0x3c, 0x59, - 0xd8, 0xe5, 0x51, 0xbd, 0xa5, 0x37, 0xe3, 0xae, 0x66, 0x74, 0xb0, 0x14, 0xf7, 0x67, 0xcc, 0x82, - 0xb5, 0xf8, 0x8a, 0x20, 0xef, 0x43, 0x8a, 0xcf, 0x0a, 0x21, 0x48, 0x5a, 0x9a, 0x89, 0x39, 0x8e, - 0xad, 0x91, 0x02, 0x29, 0xdb, 0xa1, 0x8c, 0x7a, 0xfc, 0x07, 0x93, 0x0b, 0x92, 0xe4, 0x43, 0xf8, - 0x7d, 0x88, 0xee, 0x18, 0x36, 0xff, 0x45, 0xd9, 0x4c, 0x2a, 0x19, 0x72, 0x2c, 0xdd, 0x82, 0x3f, - 0x77, 0x31, 0x1d, 0x08, 0xa2, 0xe2, 0x57, 0x1d, 0x4c, 0xe8, 0x24, 0x8b, 0x94, 0x4e, 0x20, 0x7b, - 0x39, 0x9d, 0x38, 0xb6, 0x45, 0x30, 0x3a, 0x80, 0xf4, 0x40, 0x62, 0x06, 0xcb, 0x54, 0xff, 0x9f, - 0x65, 0x10, 0x5c, 0xf8, 0xb0, 0x48, 0x69, 0x09, 0xfe, 0xda, 0xd3, 0x49, 0xd8, 0x8a, 0x04, 0xd4, - 0x24, 0x48, 0x1d, 0xeb, 0x06, 0xc5, 0x2e, 0x91, 0x84, 0x62, 0xa2, 0x9c, 0x56, 0x83, 0xb0, 0x64, - 0x40, 0x6e, 0x18, 0xc2, 0xe9, 0xa9, 0x00, 0x61, 0x63, 0x06, 0xbb, 0x1a, 0xbf, 0x48, 0x95, 0xd2, - 0x4b, 0xc8, 0x6d, 0xb2, 0xeb, 0x3c, 0x22, 0xde, 0xaf, 0x17, 0xa3, 0x0d, 0x7f, 0x8f, 0xf4, 0xba, - 0x31, 0xe5, 0x3f, 0x0a, 0x90, 0x7b, 0xc2, 0x3c, 0x76, 0xf3, 0x27, 0x43, 0xeb, 0x90, 0xf1, 0xfd, - 0xcc, 0xde, 0x73, 0x7e, 0x6b, 0x47, 0x1f, 0x82, 0x1d, 0xef, 0xc9, 0xdf, 0xd7, 0x48, 0x5b, 0xe5, - 0xcf, 0x86, 0xb7, 0xf6, 0x64, 0x19, 0x21, 0x7a, 0x63, 0xb2, 0x2c, 0x42, 0x6e, 0x0b, 0x1b, 0x78, - 0x8c, 0x2a, 0x93, 0xcc, 0x52, 0x87, 0xec, 0xa5, 0xfb, 0xb8, 0x8f, 0x09, 0xf1, 0xde, 0xff, 0x07, - 0xd7, 0xe4, 0x16, 0x61, 0x55, 0xfd, 0x36, 0x07, 0x10, 0x5e, 0x78, 0xd4, 0x85, 0xc4, 0x2e, 0xa6, - 0xe8, 0xee, 0x14, 0xe5, 0xc6, 0xd8, 0x5e, 0x5e, 0x9e, 0x19, 0xc7, 0xe5, 0x7e, 0x03, 0x49, 0xef, - 0xa8, 0x68, 0x9a, 0xbf, 0xcc, 0xb1, 0xb6, 0x96, 0x57, 0xaf, 0x80, 0xe4, 0xcd, 0xdf, 0x09, 0x00, - 0xde, 0xd6, 0x21, 0x75, 0xb1, 0x66, 0x5e, 0x83, 0xc3, 0xf2, 0xac, 0x48, 0x3e, 0xd1, 0x45, 0x01, - 0x9d, 0x82, 0xe8, 0x3b, 0x14, 0x4d, 0x73, 0x90, 0xf1, 0x0f, 0x87, 0xbc, 0x76, 0x15, 0x28, 0x17, - 0xe1, 0x14, 0x44, 0xdf, 0x0b, 0x53, 0x11, 0x18, 0xef, 0xef, 0xa9, 0x08, 0x4c, 0x72, 0xdc, 0x73, - 0x10, 0x7d, 0x7f, 0x4c, 0x45, 0x60, 0xbc, 0x95, 0xe4, 0xdc, 0x88, 0xf3, 0xb7, 0xbd, 0x2f, 0xc1, - 0xda, 0x8b, 0xb3, 0x8b, 0x7c, 0xec, 0xf3, 0x45, 0x3e, 0xf6, 0xb6, 0x9f, 0x17, 0xce, 0xfa, 0x79, - 0xe1, 0x53, 0x3f, 0x2f, 0x7c, 0xed, 0xe7, 0x85, 0x67, 0x3b, 0xd7, 0xf8, 0xb8, 0x5d, 0x0f, 0xa3, - 0xa7, 0xb1, 0xba, 0xc8, 0x7a, 0xde, 0xfe, 0x1e, 0x00, 0x00, 0xff, 0xff, 0xd0, 0xae, 0xca, 0xcb, - 0x2f, 0x0b, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// ContainersClient is the client API for Containers service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ContainersClient interface { - Get(ctx context.Context, in *GetContainerRequest, opts ...grpc.CallOption) (*GetContainerResponse, error) - List(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (*ListContainersResponse, error) - ListStream(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (Containers_ListStreamClient, error) - Create(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) - Update(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) - Delete(ctx context.Context, in *DeleteContainerRequest, opts ...grpc.CallOption) (*types.Empty, error) -} -type containersClient struct { - cc *grpc.ClientConn +func (x *UpdateContainerResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func NewContainersClient(cc *grpc.ClientConn) ContainersClient { - return &containersClient{cc} -} +func (*UpdateContainerResponse) ProtoMessage() {} -func (c *containersClient) Get(ctx context.Context, in *GetContainerRequest, opts ...grpc.CallOption) (*GetContainerResponse, error) { - out := new(GetContainerResponse) - err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Get", in, out, opts...) - if err != nil { - return nil, err +func (x *UpdateContainerResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return out, nil + return mi.MessageOf(x) } -func (c *containersClient) List(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (*ListContainersResponse, error) { - out := new(ListContainersResponse) - err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/List", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +// Deprecated: Use UpdateContainerResponse.ProtoReflect.Descriptor instead. +func (*UpdateContainerResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{8} } -func (c *containersClient) ListStream(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (Containers_ListStreamClient, error) { - stream, err := c.cc.NewStream(ctx, &_Containers_serviceDesc.Streams[0], "/containerd.services.containers.v1.Containers/ListStream", opts...) - if err != nil { - return nil, err +func (x *UpdateContainerResponse) GetContainer() *Container { + if x != nil { + return x.Container } - x := &containersListStreamClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil + return nil } -type Containers_ListStreamClient interface { - Recv() (*ListContainerMessage, error) - grpc.ClientStream -} +type DeleteContainerRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -type containersListStreamClient struct { - grpc.ClientStream + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` } -func (x *containersListStreamClient) Recv() (*ListContainerMessage, error) { - m := new(ListContainerMessage) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err +func (x *DeleteContainerRequest) Reset() { + *x = DeleteContainerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return m, nil } -func (c *containersClient) Create(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) { - out := new(CreateContainerResponse) - err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Create", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +func (x *DeleteContainerRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (c *containersClient) Update(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) { - out := new(UpdateContainerResponse) - err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Update", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} +func (*DeleteContainerRequest) ProtoMessage() {} -func (c *containersClient) Delete(ctx context.Context, in *DeleteContainerRequest, opts ...grpc.CallOption) (*types.Empty, error) { - out := new(types.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Delete", in, out, opts...) - if err != nil { - return nil, err +func (x *DeleteContainerRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return out, nil + return mi.MessageOf(x) } -// ContainersServer is the server API for Containers service. -type ContainersServer interface { - Get(context.Context, *GetContainerRequest) (*GetContainerResponse, error) - List(context.Context, *ListContainersRequest) (*ListContainersResponse, error) - ListStream(*ListContainersRequest, Containers_ListStreamServer) error - Create(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error) - Update(context.Context, *UpdateContainerRequest) (*UpdateContainerResponse, error) - Delete(context.Context, *DeleteContainerRequest) (*types.Empty, error) +// Deprecated: Use DeleteContainerRequest.ProtoReflect.Descriptor instead. +func (*DeleteContainerRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{9} } -// UnimplementedContainersServer can be embedded to have forward compatible implementations. -type UnimplementedContainersServer struct { +func (x *DeleteContainerRequest) GetID() string { + if x != nil { + return x.ID + } + return "" } -func (*UnimplementedContainersServer) Get(ctx context.Context, req *GetContainerRequest) (*GetContainerResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") -} -func (*UnimplementedContainersServer) List(ctx context.Context, req *ListContainersRequest) (*ListContainersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method List not implemented") -} -func (*UnimplementedContainersServer) ListStream(req *ListContainersRequest, srv Containers_ListStreamServer) error { - return status.Errorf(codes.Unimplemented, "method ListStream not implemented") -} -func (*UnimplementedContainersServer) Create(ctx context.Context, req *CreateContainerRequest) (*CreateContainerResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") -} -func (*UnimplementedContainersServer) Update(ctx context.Context, req *UpdateContainerRequest) (*UpdateContainerResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") -} -func (*UnimplementedContainersServer) Delete(ctx context.Context, req *DeleteContainerRequest) (*types.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") -} +type ListContainerMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func RegisterContainersServer(s *grpc.Server, srv ContainersServer) { - s.RegisterService(&_Containers_serviceDesc, srv) + Container *Container `protobuf:"bytes,1,opt,name=container,proto3" json:"container,omitempty"` } -func _Containers_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetContainerRequest) - if err := dec(in); err != nil { - return nil, err +func (x *ListContainerMessage) Reset() { + *x = ListContainerMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - if interceptor == nil { - return srv.(ContainersServer).Get(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.containers.v1.Containers/Get", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContainersServer).Get(ctx, req.(*GetContainerRequest)) - } - return interceptor(ctx, in, info, handler) } -func _Containers_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListContainersRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContainersServer).List(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.containers.v1.Containers/List", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContainersServer).List(ctx, req.(*ListContainersRequest)) - } - return interceptor(ctx, in, info, handler) +func (x *ListContainerMessage) String() string { + return protoimpl.X.MessageStringOf(x) } -func _Containers_ListStream_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(ListContainersRequest) - if err := stream.RecvMsg(m); err != nil { - return err +func (*ListContainerMessage) ProtoMessage() {} + +func (x *ListContainerMessage) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return srv.(ContainersServer).ListStream(m, &containersListStreamServer{stream}) + return mi.MessageOf(x) } -type Containers_ListStreamServer interface { - Send(*ListContainerMessage) error - grpc.ServerStream +// Deprecated: Use ListContainerMessage.ProtoReflect.Descriptor instead. +func (*ListContainerMessage) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{10} } -type containersListStreamServer struct { - grpc.ServerStream +func (x *ListContainerMessage) GetContainer() *Container { + if x != nil { + return x.Container + } + return nil } -func (x *containersListStreamServer) Send(m *ListContainerMessage) error { - return x.ServerStream.SendMsg(m) -} +type Container_Runtime struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func _Containers_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateContainerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContainersServer).Create(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.containers.v1.Containers/Create", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContainersServer).Create(ctx, req.(*CreateContainerRequest)) - } - return interceptor(ctx, in, info, handler) + // Name is the name of the runtime. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Options specify additional runtime initialization options. + Options *anypb.Any `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` } -func _Containers_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateContainerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContainersServer).Update(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.containers.v1.Containers/Update", +func (x *Container_Runtime) Reset() { + *x = Container_Runtime{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContainersServer).Update(ctx, req.(*UpdateContainerRequest)) - } - return interceptor(ctx, in, info, handler) } -func _Containers_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteContainerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContainersServer).Delete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.containers.v1.Containers/Delete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContainersServer).Delete(ctx, req.(*DeleteContainerRequest)) - } - return interceptor(ctx, in, info, handler) +func (x *Container_Runtime) String() string { + return protoimpl.X.MessageStringOf(x) } -var _Containers_serviceDesc = grpc.ServiceDesc{ - ServiceName: "containerd.services.containers.v1.Containers", - HandlerType: (*ContainersServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Get", - Handler: _Containers_Get_Handler, - }, - { - MethodName: "List", - Handler: _Containers_List_Handler, - }, - { - MethodName: "Create", - Handler: _Containers_Create_Handler, - }, - { - MethodName: "Update", - Handler: _Containers_Update_Handler, - }, - { - MethodName: "Delete", - Handler: _Containers_Delete_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "ListStream", - Handler: _Containers_ListStream_Handler, - ServerStreams: true, - }, - }, - Metadata: "github.com/containerd/containerd/api/services/containers/v1/containers.proto", -} +func (*Container_Runtime) ProtoMessage() {} -func (m *Container) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *Container_Runtime) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *Container) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use Container_Runtime.ProtoReflect.Descriptor instead. +func (*Container_Runtime) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP(), []int{0, 1} } -func (m *Container) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Extensions) > 0 { - for k := range m.Extensions { - v := m.Extensions[k] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContainers(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintContainers(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintContainers(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x52 - } - } - n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt):]) - if err2 != nil { - return 0, err2 - } - i -= n2 - i = encodeVarintContainers(dAtA, i, uint64(n2)) - i-- - dAtA[i] = 0x4a - n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt):]) - if err3 != nil { - return 0, err3 - } - i -= n3 - i = encodeVarintContainers(dAtA, i, uint64(n3)) - i-- - dAtA[i] = 0x42 - if len(m.SnapshotKey) > 0 { - i -= len(m.SnapshotKey) - copy(dAtA[i:], m.SnapshotKey) - i = encodeVarintContainers(dAtA, i, uint64(len(m.SnapshotKey))) - i-- - dAtA[i] = 0x3a - } - if len(m.Snapshotter) > 0 { - i -= len(m.Snapshotter) - copy(dAtA[i:], m.Snapshotter) - i = encodeVarintContainers(dAtA, i, uint64(len(m.Snapshotter))) - i-- - dAtA[i] = 0x32 - } - if m.Spec != nil { - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContainers(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.Runtime != nil { - { - size, err := m.Runtime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContainers(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if len(m.Image) > 0 { - i -= len(m.Image) - copy(dAtA[i:], m.Image) - i = encodeVarintContainers(dAtA, i, uint64(len(m.Image))) - i-- - dAtA[i] = 0x1a - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintContainers(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintContainers(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintContainers(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintContainers(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa +func (x *Container_Runtime) GetName() string { + if x != nil { + return x.Name } - return len(dAtA) - i, nil + return "" } -func (m *Container_Runtime) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *Container_Runtime) GetOptions() *anypb.Any { + if x != nil { + return x.Options } - return dAtA[:n], nil -} - -func (m *Container_Runtime) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return nil } -func (m *Container_Runtime) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Options != nil { - { - size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContainers(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintContainers(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetContainerRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetContainerRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetContainerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintContainers(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetContainerResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetContainerResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetContainerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContainers(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ListContainersRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListContainersRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListContainersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Filters) > 0 { - for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Filters[iNdEx]) - copy(dAtA[i:], m.Filters[iNdEx]) - i = encodeVarintContainers(dAtA, i, uint64(len(m.Filters[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ListContainersResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListContainersResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListContainersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Containers) > 0 { - for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContainers(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *CreateContainerRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CreateContainerRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CreateContainerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContainers(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CreateContainerResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CreateContainerResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CreateContainerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContainers(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *UpdateContainerRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UpdateContainerRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UpdateContainerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.UpdateMask != nil { - { - size, err := m.UpdateMask.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContainers(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContainers(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *UpdateContainerResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UpdateContainerResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UpdateContainerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContainers(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *DeleteContainerRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteContainerRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteContainerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintContainers(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ListContainerMessage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListContainerMessage) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListContainerMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Container != nil { - { - size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContainers(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintContainers(dAtA []byte, offset int, v uint64) int { - offset -= sovContainers(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Container) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovContainers(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovContainers(uint64(len(k))) + 1 + len(v) + sovContainers(uint64(len(v))) - n += mapEntrySize + 1 + sovContainers(uint64(mapEntrySize)) - } - } - l = len(m.Image) - if l > 0 { - n += 1 + l + sovContainers(uint64(l)) - } - if m.Runtime != nil { - l = m.Runtime.Size() - n += 1 + l + sovContainers(uint64(l)) - } - if m.Spec != nil { - l = m.Spec.Size() - n += 1 + l + sovContainers(uint64(l)) - } - l = len(m.Snapshotter) - if l > 0 { - n += 1 + l + sovContainers(uint64(l)) - } - l = len(m.SnapshotKey) - if l > 0 { - n += 1 + l + sovContainers(uint64(l)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt) - n += 1 + l + sovContainers(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt) - n += 1 + l + sovContainers(uint64(l)) - if len(m.Extensions) > 0 { - for k, v := range m.Extensions { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovContainers(uint64(len(k))) + 1 + l + sovContainers(uint64(l)) - n += mapEntrySize + 1 + sovContainers(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Container_Runtime) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovContainers(uint64(l)) - } - if m.Options != nil { - l = m.Options.Size() - n += 1 + l + sovContainers(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GetContainerRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovContainers(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GetContainerResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Container.Size() - n += 1 + l + sovContainers(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListContainersRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Filters) > 0 { - for _, s := range m.Filters { - l = len(s) - n += 1 + l + sovContainers(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListContainersResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Containers) > 0 { - for _, e := range m.Containers { - l = e.Size() - n += 1 + l + sovContainers(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CreateContainerRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Container.Size() - n += 1 + l + sovContainers(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CreateContainerResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Container.Size() - n += 1 + l + sovContainers(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UpdateContainerRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Container.Size() - n += 1 + l + sovContainers(uint64(l)) - if m.UpdateMask != nil { - l = m.UpdateMask.Size() - n += 1 + l + sovContainers(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UpdateContainerResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Container.Size() - n += 1 + l + sovContainers(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DeleteContainerRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovContainers(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListContainerMessage) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Container != nil { - l = m.Container.Size() - n += 1 + l + sovContainers(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovContainers(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozContainers(x uint64) (n int) { - return sovContainers(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Container) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - keysForExtensions := make([]string, 0, len(this.Extensions)) - for k, _ := range this.Extensions { - keysForExtensions = append(keysForExtensions, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForExtensions) - mapStringForExtensions := "map[string]types.Any{" - for _, k := range keysForExtensions { - mapStringForExtensions += fmt.Sprintf("%v: %v,", k, this.Extensions[k]) - } - mapStringForExtensions += "}" - s := strings.Join([]string{`&Container{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Labels:` + mapStringForLabels + `,`, - `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `Runtime:` + strings.Replace(fmt.Sprintf("%v", this.Runtime), "Container_Runtime", "Container_Runtime", 1) + `,`, - `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "Any", "types.Any", 1) + `,`, - `Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`, - `SnapshotKey:` + fmt.Sprintf("%v", this.SnapshotKey) + `,`, - `CreatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CreatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `UpdatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UpdatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `Extensions:` + mapStringForExtensions + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *Container_Runtime) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Container_Runtime{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "types.Any", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *GetContainerRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GetContainerRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *GetContainerResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GetContainerResponse{`, - `Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "Container", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListContainersRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ListContainersRequest{`, - `Filters:` + fmt.Sprintf("%v", this.Filters) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListContainersResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForContainers := "[]Container{" - for _, f := range this.Containers { - repeatedStringForContainers += strings.Replace(strings.Replace(f.String(), "Container", "Container", 1), `&`, ``, 1) + "," - } - repeatedStringForContainers += "}" - s := strings.Join([]string{`&ListContainersResponse{`, - `Containers:` + repeatedStringForContainers + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CreateContainerRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CreateContainerRequest{`, - `Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "Container", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CreateContainerResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CreateContainerResponse{`, - `Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "Container", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UpdateContainerRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UpdateContainerRequest{`, - `Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "Container", 1), `&`, ``, 1) + `,`, - `UpdateMask:` + strings.Replace(fmt.Sprintf("%v", this.UpdateMask), "FieldMask", "types.FieldMask", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UpdateContainerResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UpdateContainerResponse{`, - `Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "Container", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DeleteContainerRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeleteContainerRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListContainerMessage) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ListContainerMessage{`, - `Container:` + strings.Replace(this.Container.String(), "Container", "Container", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringContainers(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Container) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Container: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthContainers - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthContainers - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthContainers - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthContainers - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipContainers(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainers - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Image = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Runtime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Runtime == nil { - m.Runtime = &Container_Runtime{} - } - if err := m.Runtime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Spec == nil { - m.Spec = &types.Any{} - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Snapshotter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SnapshotKey", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SnapshotKey = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Extensions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Extensions == nil { - m.Extensions = make(map[string]types.Any) - } - var mapkey string - mapvalue := &types.Any{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthContainers - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthContainers - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthContainers - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthContainers - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &types.Any{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipContainers(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainers - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Extensions[mapkey] = *mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainers(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainers - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Container_Runtime) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Runtime: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Runtime: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Options == nil { - m.Options = &types.Any{} - } - if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainers(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainers - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetContainerRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetContainerRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainers(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainers - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetContainerResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetContainerResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetContainerResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainers(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainers - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListContainersRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListContainersRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListContainersRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainers(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainers - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListContainersResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListContainersResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListContainersResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Containers = append(m.Containers, Container{}) - if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainers(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainers - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CreateContainerRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CreateContainerRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CreateContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainers(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainers - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CreateContainerResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CreateContainerResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CreateContainerResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainers(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainers - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UpdateContainerRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateContainerRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateMask", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.UpdateMask == nil { - m.UpdateMask = &types.FieldMask{} - } - if err := m.UpdateMask.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainers(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainers - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UpdateContainerResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateContainerResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateContainerResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainers(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainers - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteContainerRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteContainerRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainers(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainers - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListContainerMessage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListContainerMessage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListContainerMessage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContainers - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContainers - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContainers - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Container == nil { - m.Container = &Container{} - } - if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContainers(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContainers - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipContainers(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowContainers - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowContainers - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowContainers - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthContainers - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupContainers - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthContainers - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF +var File_github_com_containerd_containerd_api_services_containers_v1_containers_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDesc = []byte{ + 0x0a, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x21, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, + 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, + 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8f, 0x06, 0x0a, + 0x09, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x50, 0x0a, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x14, 0x0a, 0x05, + 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x69, 0x6d, 0x61, + 0x67, 0x65, 0x12, 0x4e, 0x0a, 0x07, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x52, 0x07, 0x72, 0x75, 0x6e, 0x74, 0x69, + 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x04, 0x73, 0x70, 0x65, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12, 0x20, 0x0a, 0x0b, + 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x12, 0x21, + 0x0a, 0x0c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4b, 0x65, + 0x79, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x5c, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, + 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x1a, + 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4d, 0x0a, 0x07, 0x52, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, + 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x53, 0x0a, 0x0f, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x25, + 0x0a, 0x13, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x62, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, + 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x09, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x22, 0x31, 0x0a, 0x15, 0x4c, 0x69, 0x73, + 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x22, 0x66, 0x0a, 0x16, + 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x73, 0x22, 0x64, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4a, + 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, + 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x22, 0x65, 0x0a, 0x17, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x22, 0xa1, 0x01, 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4a, 0x0a, 0x09, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x09, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x65, 0x0a, 0x17, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x4a, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x22, 0x28, 0x0a, 0x16, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x62, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x4a, + 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, + 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x32, 0xe4, 0x05, 0x0a, 0x0a, 0x43, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x12, 0x76, 0x0a, 0x03, 0x47, 0x65, 0x74, + 0x12, 0x36, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, + 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x7b, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x38, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x81, + 0x01, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x38, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x30, 0x01, 0x12, 0x7f, 0x0a, 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x39, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x7f, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x39, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5b, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x39, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x42, 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x2f, 0x76, 0x31, + 0x3b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( - ErrInvalidLengthContainers = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowContainers = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupContainers = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescData = file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes = make([]protoimpl.MessageInfo, 14) +var file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_goTypes = []interface{}{ + (*Container)(nil), // 0: containerd.services.containers.v1.Container + (*GetContainerRequest)(nil), // 1: containerd.services.containers.v1.GetContainerRequest + (*GetContainerResponse)(nil), // 2: containerd.services.containers.v1.GetContainerResponse + (*ListContainersRequest)(nil), // 3: containerd.services.containers.v1.ListContainersRequest + (*ListContainersResponse)(nil), // 4: containerd.services.containers.v1.ListContainersResponse + (*CreateContainerRequest)(nil), // 5: containerd.services.containers.v1.CreateContainerRequest + (*CreateContainerResponse)(nil), // 6: containerd.services.containers.v1.CreateContainerResponse + (*UpdateContainerRequest)(nil), // 7: containerd.services.containers.v1.UpdateContainerRequest + (*UpdateContainerResponse)(nil), // 8: containerd.services.containers.v1.UpdateContainerResponse + (*DeleteContainerRequest)(nil), // 9: containerd.services.containers.v1.DeleteContainerRequest + (*ListContainerMessage)(nil), // 10: containerd.services.containers.v1.ListContainerMessage + nil, // 11: containerd.services.containers.v1.Container.LabelsEntry + (*Container_Runtime)(nil), // 12: containerd.services.containers.v1.Container.Runtime + nil, // 13: containerd.services.containers.v1.Container.ExtensionsEntry + (*anypb.Any)(nil), // 14: google.protobuf.Any + (*timestamppb.Timestamp)(nil), // 15: google.protobuf.Timestamp + (*fieldmaskpb.FieldMask)(nil), // 16: google.protobuf.FieldMask + (*emptypb.Empty)(nil), // 17: google.protobuf.Empty +} +var file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_depIdxs = []int32{ + 11, // 0: containerd.services.containers.v1.Container.labels:type_name -> containerd.services.containers.v1.Container.LabelsEntry + 12, // 1: containerd.services.containers.v1.Container.runtime:type_name -> containerd.services.containers.v1.Container.Runtime + 14, // 2: containerd.services.containers.v1.Container.spec:type_name -> google.protobuf.Any + 15, // 3: containerd.services.containers.v1.Container.created_at:type_name -> google.protobuf.Timestamp + 15, // 4: containerd.services.containers.v1.Container.updated_at:type_name -> google.protobuf.Timestamp + 13, // 5: containerd.services.containers.v1.Container.extensions:type_name -> containerd.services.containers.v1.Container.ExtensionsEntry + 0, // 6: containerd.services.containers.v1.GetContainerResponse.container:type_name -> containerd.services.containers.v1.Container + 0, // 7: containerd.services.containers.v1.ListContainersResponse.containers:type_name -> containerd.services.containers.v1.Container + 0, // 8: containerd.services.containers.v1.CreateContainerRequest.container:type_name -> containerd.services.containers.v1.Container + 0, // 9: containerd.services.containers.v1.CreateContainerResponse.container:type_name -> containerd.services.containers.v1.Container + 0, // 10: containerd.services.containers.v1.UpdateContainerRequest.container:type_name -> containerd.services.containers.v1.Container + 16, // 11: containerd.services.containers.v1.UpdateContainerRequest.update_mask:type_name -> google.protobuf.FieldMask + 0, // 12: containerd.services.containers.v1.UpdateContainerResponse.container:type_name -> containerd.services.containers.v1.Container + 0, // 13: containerd.services.containers.v1.ListContainerMessage.container:type_name -> containerd.services.containers.v1.Container + 14, // 14: containerd.services.containers.v1.Container.Runtime.options:type_name -> google.protobuf.Any + 14, // 15: containerd.services.containers.v1.Container.ExtensionsEntry.value:type_name -> google.protobuf.Any + 1, // 16: containerd.services.containers.v1.Containers.Get:input_type -> containerd.services.containers.v1.GetContainerRequest + 3, // 17: containerd.services.containers.v1.Containers.List:input_type -> containerd.services.containers.v1.ListContainersRequest + 3, // 18: containerd.services.containers.v1.Containers.ListStream:input_type -> containerd.services.containers.v1.ListContainersRequest + 5, // 19: containerd.services.containers.v1.Containers.Create:input_type -> containerd.services.containers.v1.CreateContainerRequest + 7, // 20: containerd.services.containers.v1.Containers.Update:input_type -> containerd.services.containers.v1.UpdateContainerRequest + 9, // 21: containerd.services.containers.v1.Containers.Delete:input_type -> containerd.services.containers.v1.DeleteContainerRequest + 2, // 22: containerd.services.containers.v1.Containers.Get:output_type -> containerd.services.containers.v1.GetContainerResponse + 4, // 23: containerd.services.containers.v1.Containers.List:output_type -> containerd.services.containers.v1.ListContainersResponse + 10, // 24: containerd.services.containers.v1.Containers.ListStream:output_type -> containerd.services.containers.v1.ListContainerMessage + 6, // 25: containerd.services.containers.v1.Containers.Create:output_type -> containerd.services.containers.v1.CreateContainerResponse + 8, // 26: containerd.services.containers.v1.Containers.Update:output_type -> containerd.services.containers.v1.UpdateContainerResponse + 17, // 27: containerd.services.containers.v1.Containers.Delete:output_type -> google.protobuf.Empty + 22, // [22:28] is the sub-list for method output_type + 16, // [16:22] is the sub-list for method input_type + 16, // [16:16] is the sub-list for extension type_name + 16, // [16:16] is the sub-list for extension extendee + 0, // [0:16] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_init() } +func file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_init() { + if File_github_com_containerd_containerd_api_services_containers_v1_containers_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Container); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetContainerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetContainerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListContainersRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListContainersResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateContainerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateContainerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateContainerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateContainerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteContainerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListContainerMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Container_Runtime); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDesc, + NumEnums: 0, + NumMessages: 14, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_containers_v1_containers_proto = out.File + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_containers_v1_containers_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto index 36ab177de7781..3de07ffbd6e6a 100644 --- a/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto +++ b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto @@ -18,7 +18,6 @@ syntax = "proto3"; package containerd.services.containers.v1; -import weak "gogoproto/gogo.proto"; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; @@ -99,10 +98,10 @@ message Container { string snapshot_key = 7; // CreatedAt is the time the container was first created. - google.protobuf.Timestamp created_at = 8 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp created_at = 8; // UpdatedAt is the last time the container was mutated. - google.protobuf.Timestamp updated_at = 9 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp updated_at = 9; // Extensions allow clients to provide zero or more blobs that are directly // associated with the container. One may provide protobuf, json, or other @@ -113,7 +112,10 @@ message Container { // that should be unique against other extensions. When updating extension // data, one should only update the specified extension using field paths // to select a specific map key. - map extensions = 10 [(gogoproto.nullable) = false]; + map extensions = 10; + + // Sandbox ID this container belongs to. + string sandbox = 11; } message GetContainerRequest { @@ -121,7 +123,7 @@ message GetContainerRequest { } message GetContainerResponse { - Container container = 1 [(gogoproto.nullable) = false]; + Container container = 1; } message ListContainersRequest { @@ -132,22 +134,22 @@ message ListContainersRequest { // filters. Expanded, containers that match the following will be // returned: // - // filters[0] or filters[1] or ... or filters[n-1] or filters[n] + // filters[0] or filters[1] or ... or filters[n-1] or filters[n] // // If filters is zero-length or nil, all items will be returned. repeated string filters = 1; } message ListContainersResponse { - repeated Container containers = 1 [(gogoproto.nullable) = false]; + repeated Container containers = 1; } message CreateContainerRequest { - Container container = 1 [(gogoproto.nullable) = false]; + Container container = 1; } message CreateContainerResponse { - Container container = 1 [(gogoproto.nullable) = false]; + Container container = 1; } // UpdateContainerRequest updates the metadata on one or more container. @@ -159,7 +161,7 @@ message UpdateContainerRequest { // Container provides the target values, as declared by the mask, for the update. // // The ID field must be set. - Container container = 1 [(gogoproto.nullable) = false]; + Container container = 1; // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. @@ -167,7 +169,7 @@ message UpdateContainerRequest { } message UpdateContainerResponse { - Container container = 1 [(gogoproto.nullable) = false]; + Container container = 1; } message DeleteContainerRequest { diff --git a/vendor/github.com/containerd/containerd/api/services/containers/v1/containers_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers_grpc.pb.go new file mode 100644 index 0000000000000..701e5c1ebce50 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers_grpc.pb.go @@ -0,0 +1,314 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/containers/v1/containers.proto + +package containers + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// ContainersClient is the client API for Containers service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ContainersClient interface { + Get(ctx context.Context, in *GetContainerRequest, opts ...grpc.CallOption) (*GetContainerResponse, error) + List(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (*ListContainersResponse, error) + ListStream(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (Containers_ListStreamClient, error) + Create(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) + Update(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) + Delete(ctx context.Context, in *DeleteContainerRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) +} + +type containersClient struct { + cc grpc.ClientConnInterface +} + +func NewContainersClient(cc grpc.ClientConnInterface) ContainersClient { + return &containersClient{cc} +} + +func (c *containersClient) Get(ctx context.Context, in *GetContainerRequest, opts ...grpc.CallOption) (*GetContainerResponse, error) { + out := new(GetContainerResponse) + err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *containersClient) List(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (*ListContainersResponse, error) { + out := new(ListContainersResponse) + err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *containersClient) ListStream(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (Containers_ListStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &Containers_ServiceDesc.Streams[0], "/containerd.services.containers.v1.Containers/ListStream", opts...) + if err != nil { + return nil, err + } + x := &containersListStreamClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Containers_ListStreamClient interface { + Recv() (*ListContainerMessage, error) + grpc.ClientStream +} + +type containersListStreamClient struct { + grpc.ClientStream +} + +func (x *containersListStreamClient) Recv() (*ListContainerMessage, error) { + m := new(ListContainerMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *containersClient) Create(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) { + out := new(CreateContainerResponse) + err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *containersClient) Update(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) { + out := new(UpdateContainerResponse) + err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *containersClient) Delete(ctx context.Context, in *DeleteContainerRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ContainersServer is the server API for Containers service. +// All implementations must embed UnimplementedContainersServer +// for forward compatibility +type ContainersServer interface { + Get(context.Context, *GetContainerRequest) (*GetContainerResponse, error) + List(context.Context, *ListContainersRequest) (*ListContainersResponse, error) + ListStream(*ListContainersRequest, Containers_ListStreamServer) error + Create(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error) + Update(context.Context, *UpdateContainerRequest) (*UpdateContainerResponse, error) + Delete(context.Context, *DeleteContainerRequest) (*emptypb.Empty, error) + mustEmbedUnimplementedContainersServer() +} + +// UnimplementedContainersServer must be embedded to have forward compatible implementations. +type UnimplementedContainersServer struct { +} + +func (UnimplementedContainersServer) Get(context.Context, *GetContainerRequest) (*GetContainerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (UnimplementedContainersServer) List(context.Context, *ListContainersRequest) (*ListContainersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method List not implemented") +} +func (UnimplementedContainersServer) ListStream(*ListContainersRequest, Containers_ListStreamServer) error { + return status.Errorf(codes.Unimplemented, "method ListStream not implemented") +} +func (UnimplementedContainersServer) Create(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") +} +func (UnimplementedContainersServer) Update(context.Context, *UpdateContainerRequest) (*UpdateContainerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") +} +func (UnimplementedContainersServer) Delete(context.Context, *DeleteContainerRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (UnimplementedContainersServer) mustEmbedUnimplementedContainersServer() {} + +// UnsafeContainersServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ContainersServer will +// result in compilation errors. +type UnsafeContainersServer interface { + mustEmbedUnimplementedContainersServer() +} + +func RegisterContainersServer(s grpc.ServiceRegistrar, srv ContainersServer) { + s.RegisterService(&Containers_ServiceDesc, srv) +} + +func _Containers_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetContainerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContainersServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.containers.v1.Containers/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContainersServer).Get(ctx, req.(*GetContainerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Containers_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListContainersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContainersServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.containers.v1.Containers/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContainersServer).List(ctx, req.(*ListContainersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Containers_ListStream_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ListContainersRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ContainersServer).ListStream(m, &containersListStreamServer{stream}) +} + +type Containers_ListStreamServer interface { + Send(*ListContainerMessage) error + grpc.ServerStream +} + +type containersListStreamServer struct { + grpc.ServerStream +} + +func (x *containersListStreamServer) Send(m *ListContainerMessage) error { + return x.ServerStream.SendMsg(m) +} + +func _Containers_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateContainerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContainersServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.containers.v1.Containers/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContainersServer).Create(ctx, req.(*CreateContainerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Containers_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateContainerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContainersServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.containers.v1.Containers/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContainersServer).Update(ctx, req.(*UpdateContainerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Containers_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteContainerRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContainersServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.containers.v1.Containers/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContainersServer).Delete(ctx, req.(*DeleteContainerRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Containers_ServiceDesc is the grpc.ServiceDesc for Containers service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Containers_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.containers.v1.Containers", + HandlerType: (*ContainersServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _Containers_Get_Handler, + }, + { + MethodName: "List", + Handler: _Containers_List_Handler, + }, + { + MethodName: "Create", + Handler: _Containers_Create_Handler, + }, + { + MethodName: "Update", + Handler: _Containers_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _Containers_Delete_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ListStream", + Handler: _Containers_ListStream_Handler, + ServerStreams: true, + }, + }, + Metadata: "github.com/containerd/containerd/api/services/containers/v1/containers.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go b/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go index df272237ccfe2..28366466288e8 100644 --- a/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go @@ -1,38 +1,42 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/services/content/v1/content.proto package content import ( - context "context" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types "github.com/gogo/protobuf/types" - github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - strings "strings" - time "time" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // WriteAction defines the behavior of a WriteRequest. type WriteAction int32 @@ -40,14 +44,14 @@ type WriteAction int32 const ( // WriteActionStat instructs the writer to return the current status while // holding the lock on the write. - WriteActionStat WriteAction = 0 + WriteAction_STAT WriteAction = 0 // WriteActionWrite sets the action for the write request to write data. // // Any data included will be written at the provided offset. The // transaction will be left open for further writes. // // This is the default. - WriteActionWrite WriteAction = 1 + WriteAction_WRITE WriteAction = 1 // WriteActionCommit will write any outstanding data in the message and // commit the write, storing it under the digest. // @@ -55,243 +59,343 @@ const ( // commit it. // // This action will always terminate the write. - WriteActionCommit WriteAction = 2 + WriteAction_COMMIT WriteAction = 2 ) -var WriteAction_name = map[int32]string{ - 0: "STAT", - 1: "WRITE", - 2: "COMMIT", -} +// Enum value maps for WriteAction. +var ( + WriteAction_name = map[int32]string{ + 0: "STAT", + 1: "WRITE", + 2: "COMMIT", + } + WriteAction_value = map[string]int32{ + "STAT": 0, + "WRITE": 1, + "COMMIT": 2, + } +) -var WriteAction_value = map[string]int32{ - "STAT": 0, - "WRITE": 1, - "COMMIT": 2, +func (x WriteAction) Enum() *WriteAction { + p := new(WriteAction) + *p = x + return p } func (x WriteAction) String() string { - return proto.EnumName(WriteAction_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (WriteAction) Descriptor() protoreflect.EnumDescriptor { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_enumTypes[0].Descriptor() +} + +func (WriteAction) Type() protoreflect.EnumType { + return &file_github_com_containerd_containerd_api_services_content_v1_content_proto_enumTypes[0] } +func (x WriteAction) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use WriteAction.Descriptor instead. func (WriteAction) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{0} + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{0} } type Info struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Digest is the hash identity of the blob. - Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` + Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"` // Size is the total number of bytes in the blob. - Size_ int64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"` + Size int64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"` // CreatedAt provides the time at which the blob was committed. - CreatedAt time.Time `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3,stdtime" json:"created_at"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` // UpdatedAt provides the time the info was last updated. - UpdatedAt time.Time `protobuf:"bytes,4,opt,name=updated_at,json=updatedAt,proto3,stdtime" json:"updated_at"` + UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. - Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Info) Reset() { + *x = Info{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Info) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Info) Reset() { *m = Info{} } func (*Info) ProtoMessage() {} -func (*Info) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{0} -} -func (m *Info) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Info) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Info.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *Info) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Info.ProtoReflect.Descriptor instead. +func (*Info) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{0} +} + +func (x *Info) GetDigest() string { + if x != nil { + return x.Digest } + return "" } -func (m *Info) XXX_Merge(src proto.Message) { - xxx_messageInfo_Info.Merge(m, src) + +func (x *Info) GetSize() int64 { + if x != nil { + return x.Size + } + return 0 } -func (m *Info) XXX_Size() int { - return m.Size() + +func (x *Info) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil } -func (m *Info) XXX_DiscardUnknown() { - xxx_messageInfo_Info.DiscardUnknown(m) + +func (x *Info) GetUpdatedAt() *timestamppb.Timestamp { + if x != nil { + return x.UpdatedAt + } + return nil } -var xxx_messageInfo_Info proto.InternalMessageInfo +func (x *Info) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} type InfoRequest struct { - Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"` +} + +func (x *InfoRequest) Reset() { + *x = InfoRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InfoRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *InfoRequest) Reset() { *m = InfoRequest{} } func (*InfoRequest) ProtoMessage() {} -func (*InfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{1} -} -func (m *InfoRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *InfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_InfoRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *InfoRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *InfoRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_InfoRequest.Merge(m, src) + +// Deprecated: Use InfoRequest.ProtoReflect.Descriptor instead. +func (*InfoRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{1} } -func (m *InfoRequest) XXX_Size() int { - return m.Size() + +func (x *InfoRequest) GetDigest() string { + if x != nil { + return x.Digest + } + return "" } -func (m *InfoRequest) XXX_DiscardUnknown() { - xxx_messageInfo_InfoRequest.DiscardUnknown(m) + +type InfoResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Info *Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` } -var xxx_messageInfo_InfoRequest proto.InternalMessageInfo +func (x *InfoResponse) Reset() { + *x = InfoResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type InfoResponse struct { - Info Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *InfoResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *InfoResponse) Reset() { *m = InfoResponse{} } func (*InfoResponse) ProtoMessage() {} -func (*InfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{2} -} -func (m *InfoResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *InfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_InfoResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *InfoResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *InfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_InfoResponse.Merge(m, src) -} -func (m *InfoResponse) XXX_Size() int { - return m.Size() -} -func (m *InfoResponse) XXX_DiscardUnknown() { - xxx_messageInfo_InfoResponse.DiscardUnknown(m) + +// Deprecated: Use InfoResponse.ProtoReflect.Descriptor instead. +func (*InfoResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{2} } -var xxx_messageInfo_InfoResponse proto.InternalMessageInfo +func (x *InfoResponse) GetInfo() *Info { + if x != nil { + return x.Info + } + return nil +} type UpdateRequest struct { - Info Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Info *Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. // // In info, Digest, Size, and CreatedAt are immutable, // other field may be updated using this mask. // If no mask is provided, all mutable field are updated. - UpdateMask *types.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateRequest) Reset() { + *x = UpdateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *UpdateRequest) Reset() { *m = UpdateRequest{} } func (*UpdateRequest) ProtoMessage() {} -func (*UpdateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{3} -} -func (m *UpdateRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UpdateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UpdateRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *UpdateRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *UpdateRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateRequest.Merge(m, src) -} -func (m *UpdateRequest) XXX_Size() int { - return m.Size() + +// Deprecated: Use UpdateRequest.ProtoReflect.Descriptor instead. +func (*UpdateRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{3} } -func (m *UpdateRequest) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateRequest.DiscardUnknown(m) + +func (x *UpdateRequest) GetInfo() *Info { + if x != nil { + return x.Info + } + return nil } -var xxx_messageInfo_UpdateRequest proto.InternalMessageInfo +func (x *UpdateRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} type UpdateResponse struct { - Info Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Info *Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` } -func (m *UpdateResponse) Reset() { *m = UpdateResponse{} } -func (*UpdateResponse) ProtoMessage() {} -func (*UpdateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{4} -} -func (m *UpdateResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UpdateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UpdateResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *UpdateResponse) Reset() { + *x = UpdateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *UpdateResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateResponse.Merge(m, src) + +func (x *UpdateResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *UpdateResponse) XXX_Size() int { - return m.Size() + +func (*UpdateResponse) ProtoMessage() {} + +func (x *UpdateResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *UpdateResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateResponse.DiscardUnknown(m) + +// Deprecated: Use UpdateResponse.ProtoReflect.Descriptor instead. +func (*UpdateResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{4} } -var xxx_messageInfo_UpdateResponse proto.InternalMessageInfo +func (x *UpdateResponse) GetInfo() *Info { + if x != nil { + return x.Info + } + return nil +} type ListContentRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Filters contains one or more filters using the syntax defined in the // containerd filter package. // @@ -302,415 +406,551 @@ type ListContentRequest struct { // filters[0] or filters[1] or ... or filters[n-1] or filters[n] // // If filters is zero-length or nil, all items will be returned. - Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` +} + +func (x *ListContentRequest) Reset() { + *x = ListContentRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListContentRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ListContentRequest) Reset() { *m = ListContentRequest{} } func (*ListContentRequest) ProtoMessage() {} -func (*ListContentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{5} -} -func (m *ListContentRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListContentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListContentRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *ListContentRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *ListContentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListContentRequest.Merge(m, src) + +// Deprecated: Use ListContentRequest.ProtoReflect.Descriptor instead. +func (*ListContentRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{5} } -func (m *ListContentRequest) XXX_Size() int { - return m.Size() + +func (x *ListContentRequest) GetFilters() []string { + if x != nil { + return x.Filters + } + return nil } -func (m *ListContentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListContentRequest.DiscardUnknown(m) + +type ListContentResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Info []*Info `protobuf:"bytes,1,rep,name=info,proto3" json:"info,omitempty"` } -var xxx_messageInfo_ListContentRequest proto.InternalMessageInfo +func (x *ListContentResponse) Reset() { + *x = ListContentResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type ListContentResponse struct { - Info []Info `protobuf:"bytes,1,rep,name=info,proto3" json:"info"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *ListContentResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ListContentResponse) Reset() { *m = ListContentResponse{} } func (*ListContentResponse) ProtoMessage() {} -func (*ListContentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{6} -} -func (m *ListContentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListContentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListContentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *ListContentResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *ListContentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListContentResponse.Merge(m, src) -} -func (m *ListContentResponse) XXX_Size() int { - return m.Size() -} -func (m *ListContentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListContentResponse.DiscardUnknown(m) + +// Deprecated: Use ListContentResponse.ProtoReflect.Descriptor instead. +func (*ListContentResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{6} } -var xxx_messageInfo_ListContentResponse proto.InternalMessageInfo +func (x *ListContentResponse) GetInfo() []*Info { + if x != nil { + return x.Info + } + return nil +} type DeleteContentRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Digest specifies which content to delete. - Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"` } -func (m *DeleteContentRequest) Reset() { *m = DeleteContentRequest{} } -func (*DeleteContentRequest) ProtoMessage() {} -func (*DeleteContentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{7} -} -func (m *DeleteContentRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteContentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteContentRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *DeleteContentRequest) Reset() { + *x = DeleteContentRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *DeleteContentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteContentRequest.Merge(m, src) + +func (x *DeleteContentRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DeleteContentRequest) XXX_Size() int { - return m.Size() + +func (*DeleteContentRequest) ProtoMessage() {} + +func (x *DeleteContentRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *DeleteContentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteContentRequest.DiscardUnknown(m) + +// Deprecated: Use DeleteContentRequest.ProtoReflect.Descriptor instead. +func (*DeleteContentRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{7} } -var xxx_messageInfo_DeleteContentRequest proto.InternalMessageInfo +func (x *DeleteContentRequest) GetDigest() string { + if x != nil { + return x.Digest + } + return "" +} // ReadContentRequest defines the fields that make up a request to read a portion of // data from a stored object. type ReadContentRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Digest is the hash identity to read. - Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` + Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"` // Offset specifies the number of bytes from the start at which to begin // the read. If zero or less, the read will be from the start. This uses // standard zero-indexed semantics. Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"` // size is the total size of the read. If zero, the entire blob will be // returned by the service. - Size_ int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Size int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` +} + +func (x *ReadContentRequest) Reset() { + *x = ReadContentRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadContentRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ReadContentRequest) Reset() { *m = ReadContentRequest{} } func (*ReadContentRequest) ProtoMessage() {} -func (*ReadContentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{8} -} -func (m *ReadContentRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReadContentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReadContentRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *ReadContentRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *ReadContentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadContentRequest.Merge(m, src) + +// Deprecated: Use ReadContentRequest.ProtoReflect.Descriptor instead. +func (*ReadContentRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{8} } -func (m *ReadContentRequest) XXX_Size() int { - return m.Size() + +func (x *ReadContentRequest) GetDigest() string { + if x != nil { + return x.Digest + } + return "" } -func (m *ReadContentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ReadContentRequest.DiscardUnknown(m) + +func (x *ReadContentRequest) GetOffset() int64 { + if x != nil { + return x.Offset + } + return 0 } -var xxx_messageInfo_ReadContentRequest proto.InternalMessageInfo +func (x *ReadContentRequest) GetSize() int64 { + if x != nil { + return x.Size + } + return 0 +} // ReadContentResponse carries byte data for a read request. type ReadContentResponse struct { - Offset int64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Offset int64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"` // offset of the returned data + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` // actual data +} + +func (x *ReadContentResponse) Reset() { + *x = ReadContentResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReadContentResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ReadContentResponse) Reset() { *m = ReadContentResponse{} } func (*ReadContentResponse) ProtoMessage() {} -func (*ReadContentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{9} -} -func (m *ReadContentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReadContentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReadContentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *ReadContentResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *ReadContentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadContentResponse.Merge(m, src) -} -func (m *ReadContentResponse) XXX_Size() int { - return m.Size() + +// Deprecated: Use ReadContentResponse.ProtoReflect.Descriptor instead. +func (*ReadContentResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{9} } -func (m *ReadContentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ReadContentResponse.DiscardUnknown(m) + +func (x *ReadContentResponse) GetOffset() int64 { + if x != nil { + return x.Offset + } + return 0 } -var xxx_messageInfo_ReadContentResponse proto.InternalMessageInfo +func (x *ReadContentResponse) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} type Status struct { - StartedAt time.Time `protobuf:"bytes,1,opt,name=started_at,json=startedAt,proto3,stdtime" json:"started_at"` - UpdatedAt time.Time `protobuf:"bytes,2,opt,name=updated_at,json=updatedAt,proto3,stdtime" json:"updated_at"` - Ref string `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"` - Offset int64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"` - Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"` - Expected github_com_opencontainers_go_digest.Digest `protobuf:"bytes,6,opt,name=expected,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"expected"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Status) Reset() { *m = Status{} } + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + StartedAt *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` + UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` + Ref string `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"` + Offset int64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"` + Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"` + Expected string `protobuf:"bytes,6,opt,name=expected,proto3" json:"expected,omitempty"` +} + +func (x *Status) Reset() { + *x = Status{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{10} -} -func (m *Status) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Status.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{10} +} + +func (x *Status) GetStartedAt() *timestamppb.Timestamp { + if x != nil { + return x.StartedAt } + return nil +} + +func (x *Status) GetUpdatedAt() *timestamppb.Timestamp { + if x != nil { + return x.UpdatedAt + } + return nil } -func (m *Status) XXX_Merge(src proto.Message) { - xxx_messageInfo_Status.Merge(m, src) + +func (x *Status) GetRef() string { + if x != nil { + return x.Ref + } + return "" } -func (m *Status) XXX_Size() int { - return m.Size() + +func (x *Status) GetOffset() int64 { + if x != nil { + return x.Offset + } + return 0 } -func (m *Status) XXX_DiscardUnknown() { - xxx_messageInfo_Status.DiscardUnknown(m) + +func (x *Status) GetTotal() int64 { + if x != nil { + return x.Total + } + return 0 } -var xxx_messageInfo_Status proto.InternalMessageInfo +func (x *Status) GetExpected() string { + if x != nil { + return x.Expected + } + return "" +} type StatusRequest struct { - Ref string `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ref string `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` +} + +func (x *StatusRequest) Reset() { + *x = StatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *StatusRequest) Reset() { *m = StatusRequest{} } func (*StatusRequest) ProtoMessage() {} -func (*StatusRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{11} -} -func (m *StatusRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StatusRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *StatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *StatusRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatusRequest.Merge(m, src) + +// Deprecated: Use StatusRequest.ProtoReflect.Descriptor instead. +func (*StatusRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{11} } -func (m *StatusRequest) XXX_Size() int { - return m.Size() + +func (x *StatusRequest) GetRef() string { + if x != nil { + return x.Ref + } + return "" } -func (m *StatusRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StatusRequest.DiscardUnknown(m) + +type StatusResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status *Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` } -var xxx_messageInfo_StatusRequest proto.InternalMessageInfo +func (x *StatusResponse) Reset() { + *x = StatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type StatusResponse struct { - Status *Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *StatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *StatusResponse) Reset() { *m = StatusResponse{} } func (*StatusResponse) ProtoMessage() {} -func (*StatusResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{12} -} -func (m *StatusResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *StatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *StatusResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatusResponse.Merge(m, src) + +// Deprecated: Use StatusResponse.ProtoReflect.Descriptor instead. +func (*StatusResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{12} } -func (m *StatusResponse) XXX_Size() int { - return m.Size() + +func (x *StatusResponse) GetStatus() *Status { + if x != nil { + return x.Status + } + return nil } -func (m *StatusResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StatusResponse.DiscardUnknown(m) + +type ListStatusesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` } -var xxx_messageInfo_StatusResponse proto.InternalMessageInfo +func (x *ListStatusesRequest) Reset() { + *x = ListStatusesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type ListStatusesRequest struct { - Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *ListStatusesRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ListStatusesRequest) Reset() { *m = ListStatusesRequest{} } func (*ListStatusesRequest) ProtoMessage() {} -func (*ListStatusesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{13} -} -func (m *ListStatusesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListStatusesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListStatusesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *ListStatusesRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *ListStatusesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListStatusesRequest.Merge(m, src) + +// Deprecated: Use ListStatusesRequest.ProtoReflect.Descriptor instead. +func (*ListStatusesRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{13} } -func (m *ListStatusesRequest) XXX_Size() int { - return m.Size() + +func (x *ListStatusesRequest) GetFilters() []string { + if x != nil { + return x.Filters + } + return nil } -func (m *ListStatusesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListStatusesRequest.DiscardUnknown(m) + +type ListStatusesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Statuses []*Status `protobuf:"bytes,1,rep,name=statuses,proto3" json:"statuses,omitempty"` } -var xxx_messageInfo_ListStatusesRequest proto.InternalMessageInfo +func (x *ListStatusesResponse) Reset() { + *x = ListStatusesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type ListStatusesResponse struct { - Statuses []Status `protobuf:"bytes,1,rep,name=statuses,proto3" json:"statuses"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *ListStatusesResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ListStatusesResponse) Reset() { *m = ListStatusesResponse{} } func (*ListStatusesResponse) ProtoMessage() {} -func (*ListStatusesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{14} -} -func (m *ListStatusesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListStatusesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListStatusesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *ListStatusesResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *ListStatusesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListStatusesResponse.Merge(m, src) -} -func (m *ListStatusesResponse) XXX_Size() int { - return m.Size() -} -func (m *ListStatusesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListStatusesResponse.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_ListStatusesResponse proto.InternalMessageInfo +// Deprecated: Use ListStatusesResponse.ProtoReflect.Descriptor instead. +func (*ListStatusesResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{14} +} + +func (x *ListStatusesResponse) GetStatuses() []*Status { + if x != nil { + return x.Statuses + } + return nil +} // WriteContentRequest writes data to the request ref at offset. type WriteContentRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Action sets the behavior of the write. // // When this is a write and the ref is not yet allocated, the ref will be @@ -744,7 +984,7 @@ type WriteContentRequest struct { // Only the latest version will be used to check the content against the // digest. It is only required to include it on a single message, before or // with the commit action message. - Expected github_com_opencontainers_go_digest.Digest `protobuf:"bytes,4,opt,name=expected,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"expected"` + Expected string `protobuf:"bytes,4,opt,name=expected,proto3" json:"expected,omitempty"` // Offset specifies the number of bytes from the start at which to begin // the write. For most implementations, this means from the start of the // file. This uses standard, zero-indexed semantics. @@ -763,4663 +1003,786 @@ type WriteContentRequest struct { // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. - Labels map[string]string `protobuf:"bytes,7,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Labels map[string]string `protobuf:"bytes,7,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *WriteContentRequest) Reset() { *m = WriteContentRequest{} } -func (*WriteContentRequest) ProtoMessage() {} -func (*WriteContentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{15} -} -func (m *WriteContentRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WriteContentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WriteContentRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *WriteContentRequest) Reset() { + *x = WriteContentRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *WriteContentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WriteContentRequest.Merge(m, src) -} -func (m *WriteContentRequest) XXX_Size() int { - return m.Size() -} -func (m *WriteContentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WriteContentRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_WriteContentRequest proto.InternalMessageInfo - -// WriteContentResponse is returned on the culmination of a write call. -type WriteContentResponse struct { - // Action contains the action for the final message of the stream. A writer - // should confirm that they match the intended result. - Action WriteAction `protobuf:"varint,1,opt,name=action,proto3,enum=containerd.services.content.v1.WriteAction" json:"action,omitempty"` - // StartedAt provides the time at which the write began. - // - // This must be set for stat and commit write actions. All other write - // actions may omit this. - StartedAt time.Time `protobuf:"bytes,2,opt,name=started_at,json=startedAt,proto3,stdtime" json:"started_at"` - // UpdatedAt provides the last time of a successful write. - // - // This must be set for stat and commit write actions. All other write - // actions may omit this. - UpdatedAt time.Time `protobuf:"bytes,3,opt,name=updated_at,json=updatedAt,proto3,stdtime" json:"updated_at"` - // Offset is the current committed size for the write. - Offset int64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"` - // Total provides the current, expected total size of the write. - // - // We include this to provide consistency with the Status structure on the - // client writer. - // - // This is only valid on the Stat and Commit response. - Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"` - // Digest, if present, includes the digest up to the currently committed - // bytes. If action is commit, this field will be set. It is implementation - // defined if this is set for other actions. - Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,6,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} -func (m *WriteContentResponse) Reset() { *m = WriteContentResponse{} } -func (*WriteContentResponse) ProtoMessage() {} -func (*WriteContentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{16} -} -func (m *WriteContentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WriteContentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WriteContentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WriteContentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_WriteContentResponse.Merge(m, src) -} -func (m *WriteContentResponse) XXX_Size() int { - return m.Size() -} -func (m *WriteContentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_WriteContentResponse.DiscardUnknown(m) +func (x *WriteContentRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_WriteContentResponse proto.InternalMessageInfo - -type AbortRequest struct { - Ref string `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} +func (*WriteContentRequest) ProtoMessage() {} -func (m *AbortRequest) Reset() { *m = AbortRequest{} } -func (*AbortRequest) ProtoMessage() {} -func (*AbortRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{17} -} -func (m *AbortRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AbortRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AbortRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (x *WriteContentRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil - } -} -func (m *AbortRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AbortRequest.Merge(m, src) -} -func (m *AbortRequest) XXX_Size() int { - return m.Size() -} -func (m *AbortRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AbortRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AbortRequest proto.InternalMessageInfo - -func init() { - proto.RegisterEnum("containerd.services.content.v1.WriteAction", WriteAction_name, WriteAction_value) - proto.RegisterType((*Info)(nil), "containerd.services.content.v1.Info") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.content.v1.Info.LabelsEntry") - proto.RegisterType((*InfoRequest)(nil), "containerd.services.content.v1.InfoRequest") - proto.RegisterType((*InfoResponse)(nil), "containerd.services.content.v1.InfoResponse") - proto.RegisterType((*UpdateRequest)(nil), "containerd.services.content.v1.UpdateRequest") - proto.RegisterType((*UpdateResponse)(nil), "containerd.services.content.v1.UpdateResponse") - proto.RegisterType((*ListContentRequest)(nil), "containerd.services.content.v1.ListContentRequest") - proto.RegisterType((*ListContentResponse)(nil), "containerd.services.content.v1.ListContentResponse") - proto.RegisterType((*DeleteContentRequest)(nil), "containerd.services.content.v1.DeleteContentRequest") - proto.RegisterType((*ReadContentRequest)(nil), "containerd.services.content.v1.ReadContentRequest") - proto.RegisterType((*ReadContentResponse)(nil), "containerd.services.content.v1.ReadContentResponse") - proto.RegisterType((*Status)(nil), "containerd.services.content.v1.Status") - proto.RegisterType((*StatusRequest)(nil), "containerd.services.content.v1.StatusRequest") - proto.RegisterType((*StatusResponse)(nil), "containerd.services.content.v1.StatusResponse") - proto.RegisterType((*ListStatusesRequest)(nil), "containerd.services.content.v1.ListStatusesRequest") - proto.RegisterType((*ListStatusesResponse)(nil), "containerd.services.content.v1.ListStatusesResponse") - proto.RegisterType((*WriteContentRequest)(nil), "containerd.services.content.v1.WriteContentRequest") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.content.v1.WriteContentRequest.LabelsEntry") - proto.RegisterType((*WriteContentResponse)(nil), "containerd.services.content.v1.WriteContentResponse") - proto.RegisterType((*AbortRequest)(nil), "containerd.services.content.v1.AbortRequest") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/content/v1/content.proto", fileDescriptor_468430ba3e400391) -} - -var fileDescriptor_468430ba3e400391 = []byte{ - // 1081 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xcd, 0x6f, 0x1b, 0x45, - 0x14, 0xf7, 0x78, 0xed, 0x4d, 0xf2, 0x9c, 0x16, 0x33, 0x31, 0x95, 0xb5, 0x08, 0x67, 0xbb, 0x42, - 0xc8, 0x6a, 0xc9, 0x3a, 0x75, 0x7a, 0x00, 0x2a, 0x01, 0x8e, 0x9b, 0xaa, 0x41, 0x4d, 0x41, 0x5b, - 0x97, 0x40, 0x2f, 0x65, 0x6d, 0x8f, 0xcd, 0x2a, 0xb6, 0xd7, 0xdd, 0x19, 0x5b, 0x84, 0x13, 0x17, - 0x24, 0x14, 0xf5, 0x80, 0xb8, 0xe7, 0x02, 0xfc, 0x15, 0x1c, 0x38, 0xe7, 0xc8, 0x11, 0x71, 0x68, - 0x69, 0xfe, 0x07, 0xee, 0x68, 0x66, 0x67, 0xed, 0xf5, 0x47, 0x58, 0xdb, 0x31, 0x27, 0xbf, 0x99, - 0x7d, 0xbf, 0xf7, 0xfd, 0x31, 0x86, 0x7b, 0x4d, 0x87, 0x7d, 0xdd, 0xab, 0x9a, 0x35, 0xb7, 0x5d, - 0xa8, 0xb9, 0x1d, 0x66, 0x3b, 0x1d, 0xe2, 0xd5, 0xc3, 0xa4, 0xdd, 0x75, 0x0a, 0x94, 0x78, 0x7d, - 0xa7, 0x46, 0xa8, 0xb8, 0x27, 0x1d, 0x56, 0xe8, 0xdf, 0x0a, 0x48, 0xb3, 0xeb, 0xb9, 0xcc, 0xc5, - 0xb9, 0x21, 0xc2, 0x0c, 0xb8, 0xcd, 0x80, 0xa5, 0x7f, 0x4b, 0xcb, 0x34, 0xdd, 0xa6, 0x2b, 0x58, - 0x0b, 0x9c, 0xf2, 0x51, 0x9a, 0xde, 0x74, 0xdd, 0x66, 0x8b, 0x14, 0xc4, 0xa9, 0xda, 0x6b, 0x14, - 0x1a, 0x0e, 0x69, 0xd5, 0x9f, 0xb6, 0x6d, 0x7a, 0x24, 0x39, 0x36, 0xc7, 0x39, 0x98, 0xd3, 0x26, - 0x94, 0xd9, 0xed, 0xae, 0x64, 0x78, 0x73, 0x9c, 0x81, 0xb4, 0xbb, 0xec, 0xd8, 0xff, 0x68, 0xfc, - 0x13, 0x87, 0xc4, 0x7e, 0xa7, 0xe1, 0xe2, 0x4f, 0x40, 0xad, 0x3b, 0x4d, 0x42, 0x59, 0x16, 0xe9, - 0x28, 0xbf, 0xb6, 0x5b, 0x3c, 0x7b, 0xb1, 0x19, 0xfb, 0xeb, 0xc5, 0xe6, 0x8d, 0x90, 0xfb, 0x6e, - 0x97, 0x74, 0x06, 0x5e, 0xd0, 0x42, 0xd3, 0xdd, 0xf2, 0x21, 0xe6, 0x5d, 0xf1, 0x63, 0x49, 0x09, - 0x18, 0x43, 0x82, 0x3a, 0xdf, 0x92, 0x6c, 0x5c, 0x47, 0x79, 0xc5, 0x12, 0x34, 0x2e, 0x03, 0xd4, - 0x3c, 0x62, 0x33, 0x52, 0x7f, 0x6a, 0xb3, 0xac, 0xa2, 0xa3, 0x7c, 0xaa, 0xa8, 0x99, 0xbe, 0x69, - 0x66, 0x60, 0x9a, 0x59, 0x09, 0x6c, 0xdf, 0x5d, 0xe5, 0xfa, 0x7f, 0x7c, 0xb9, 0x89, 0xac, 0x35, - 0x89, 0x2b, 0x31, 0x2e, 0xa4, 0xd7, 0xad, 0x07, 0x42, 0x12, 0xf3, 0x08, 0x91, 0xb8, 0x12, 0xc3, - 0xf7, 0x41, 0x6d, 0xd9, 0x55, 0xd2, 0xa2, 0xd9, 0xa4, 0xae, 0xe4, 0x53, 0xc5, 0x6d, 0xf3, 0xbf, - 0x33, 0x63, 0xf2, 0xf8, 0x98, 0x0f, 0x04, 0x64, 0xaf, 0xc3, 0xbc, 0x63, 0x4b, 0xe2, 0xb5, 0xf7, - 0x21, 0x15, 0xba, 0xc6, 0x69, 0x50, 0x8e, 0xc8, 0xb1, 0x1f, 0x3f, 0x8b, 0x93, 0x38, 0x03, 0xc9, - 0xbe, 0xdd, 0xea, 0xf9, 0x91, 0x58, 0xb3, 0xfc, 0xc3, 0x07, 0xf1, 0xf7, 0x90, 0xf1, 0x25, 0xa4, - 0xb8, 0x58, 0x8b, 0x3c, 0xeb, 0xf1, 0x88, 0x2d, 0x31, 0xfa, 0xc6, 0x43, 0x58, 0xf7, 0x45, 0xd3, - 0xae, 0xdb, 0xa1, 0x04, 0x7f, 0x08, 0x09, 0xa7, 0xd3, 0x70, 0x85, 0xe4, 0x54, 0xf1, 0xed, 0x59, - 0xbc, 0xdd, 0x4d, 0x70, 0xfd, 0x96, 0xc0, 0x19, 0xcf, 0x11, 0x5c, 0x79, 0x2c, 0xa2, 0x17, 0x58, - 0x7b, 0x49, 0x89, 0xf8, 0x0e, 0xa4, 0xfc, 0x74, 0x88, 0x3a, 0x16, 0xc1, 0x99, 0x96, 0xc7, 0x7b, - 0xbc, 0xd4, 0x0f, 0x6c, 0x7a, 0x64, 0xc9, 0xac, 0x73, 0xda, 0xf8, 0x0c, 0xae, 0x06, 0xd6, 0x2c, - 0xc9, 0x41, 0x13, 0xf0, 0x03, 0x87, 0xb2, 0xb2, 0xcf, 0x12, 0x38, 0x99, 0x85, 0x95, 0x86, 0xd3, - 0x62, 0xc4, 0xa3, 0x59, 0xa4, 0x2b, 0xf9, 0x35, 0x2b, 0x38, 0x1a, 0x8f, 0x61, 0x63, 0x84, 0x7f, - 0xc2, 0x0c, 0x65, 0x21, 0x33, 0xaa, 0x90, 0xb9, 0x4b, 0x5a, 0x84, 0x91, 0x31, 0x43, 0x96, 0x59, - 0x1b, 0xcf, 0x11, 0x60, 0x8b, 0xd8, 0xf5, 0xff, 0x4f, 0x05, 0xbe, 0x06, 0xaa, 0xdb, 0x68, 0x50, - 0xc2, 0x64, 0xfb, 0xcb, 0xd3, 0x60, 0x28, 0x28, 0xc3, 0xa1, 0x60, 0x94, 0x60, 0x63, 0xc4, 0x1a, - 0x19, 0xc9, 0xa1, 0x08, 0x34, 0x2e, 0xa2, 0x6e, 0x33, 0x5b, 0x08, 0x5e, 0xb7, 0x04, 0x6d, 0xfc, - 0x1c, 0x07, 0xf5, 0x11, 0xb3, 0x59, 0x8f, 0xf2, 0xe9, 0x40, 0x99, 0xed, 0xc9, 0xe9, 0x80, 0xe6, - 0x99, 0x0e, 0x12, 0x37, 0x31, 0x62, 0xe2, 0x8b, 0x8d, 0x98, 0x34, 0x28, 0x1e, 0x69, 0x08, 0x57, - 0xd7, 0x2c, 0x4e, 0x86, 0x5c, 0x4a, 0x8c, 0xb8, 0x94, 0x81, 0x24, 0x73, 0x99, 0xdd, 0xca, 0x26, - 0xc5, 0xb5, 0x7f, 0xc0, 0x0f, 0x61, 0x95, 0x7c, 0xd3, 0x25, 0x35, 0x46, 0xea, 0x59, 0x75, 0xe1, - 0x8c, 0x0c, 0x64, 0x18, 0xd7, 0xe1, 0x8a, 0x1f, 0xa3, 0x20, 0xe1, 0xd2, 0x40, 0x34, 0x30, 0x90, - 0xb7, 0x55, 0xc0, 0x32, 0xa8, 0x67, 0x95, 0x8a, 0x1b, 0x19, 0xca, 0x77, 0xa2, 0x2a, 0x5a, 0xe2, - 0x25, 0xca, 0x28, 0xf8, 0x6d, 0xe2, 0xdf, 0x12, 0x1a, 0xdd, 0x57, 0x5f, 0x41, 0x66, 0x14, 0x20, - 0x0d, 0xb9, 0x0f, 0xab, 0x54, 0xde, 0xc9, 0xe6, 0x9a, 0xd1, 0x14, 0xd9, 0x5e, 0x03, 0xb4, 0xf1, - 0x93, 0x02, 0x1b, 0x87, 0x9e, 0x33, 0xd1, 0x62, 0x65, 0x50, 0xed, 0x1a, 0x73, 0xdc, 0x8e, 0x70, - 0xf5, 0x6a, 0xf1, 0x66, 0x94, 0x7c, 0x21, 0xa4, 0x24, 0x20, 0x96, 0x84, 0x06, 0x31, 0x8d, 0x0f, - 0x93, 0x3e, 0x48, 0xae, 0x72, 0x51, 0x72, 0x13, 0x97, 0x4f, 0x6e, 0xa8, 0xb4, 0x92, 0x53, 0xbb, - 0x45, 0x1d, 0x76, 0x0b, 0x3e, 0x1c, 0xec, 0xbe, 0x15, 0x11, 0xc8, 0x8f, 0x66, 0x72, 0x74, 0x34, - 0x5a, 0xcb, 0x5e, 0x85, 0x2f, 0xe3, 0x90, 0x19, 0x55, 0x23, 0xf3, 0xbe, 0x94, 0xac, 0x8c, 0x0e, - 0x85, 0xf8, 0x32, 0x86, 0x82, 0xb2, 0xd8, 0x50, 0x98, 0x6f, 0x04, 0x0c, 0x47, 0xb2, 0x7a, 0xe9, - 0xa9, 0xaf, 0xc3, 0x7a, 0xa9, 0xea, 0x7a, 0xec, 0xc2, 0xee, 0xbf, 0xf1, 0x3d, 0x82, 0x54, 0x28, - 0x7a, 0xf8, 0x2d, 0x48, 0x3c, 0xaa, 0x94, 0x2a, 0xe9, 0x98, 0xb6, 0x71, 0x72, 0xaa, 0xbf, 0x16, - 0xfa, 0xc4, 0x3b, 0x0b, 0x6f, 0x42, 0xf2, 0xd0, 0xda, 0xaf, 0xec, 0xa5, 0x91, 0x96, 0x39, 0x39, - 0xd5, 0xd3, 0xa1, 0xef, 0x82, 0xc4, 0xd7, 0x41, 0x2d, 0x7f, 0x7a, 0x70, 0xb0, 0x5f, 0x49, 0xc7, - 0xb5, 0x37, 0x4e, 0x4e, 0xf5, 0xd7, 0x43, 0x1c, 0x65, 0xb7, 0xdd, 0x76, 0x98, 0xb6, 0xf1, 0xc3, - 0x2f, 0xb9, 0xd8, 0x6f, 0xbf, 0xe6, 0xc2, 0x7a, 0x8b, 0xbf, 0xaf, 0xc0, 0x8a, 0x2c, 0x03, 0x6c, - 0xcb, 0x97, 0xe9, 0xcd, 0x59, 0x36, 0xa9, 0x74, 0x4d, 0x7b, 0x77, 0x36, 0x66, 0x59, 0x61, 0x4d, - 0x50, 0xfd, 0xb7, 0x04, 0xde, 0x8a, 0xc2, 0x8d, 0xbc, 0x80, 0x34, 0x73, 0x56, 0x76, 0xa9, 0xe8, - 0x19, 0x24, 0xf8, 0x68, 0xc3, 0xc5, 0x28, 0xdc, 0xe4, 0x43, 0x44, 0xdb, 0x99, 0x0b, 0xe3, 0x2b, - 0xdc, 0x46, 0xf8, 0x73, 0x50, 0xfd, 0xe7, 0x04, 0xbe, 0x1d, 0x25, 0x60, 0xda, 0xb3, 0x43, 0xbb, - 0x36, 0x51, 0xdf, 0x7b, 0xfc, 0x7f, 0x03, 0x77, 0x85, 0xef, 0xec, 0x68, 0x57, 0x26, 0xdf, 0x19, - 0xd1, 0xae, 0x4c, 0x79, 0x0d, 0x6c, 0x23, 0x9e, 0x26, 0xb9, 0xe2, 0xb7, 0x66, 0xdc, 0x41, 0xb3, - 0xa6, 0x69, 0x6c, 0xe5, 0x1d, 0xc3, 0x7a, 0x78, 0x03, 0xe1, 0x99, 0x42, 0x3f, 0xb6, 0xe0, 0xb4, - 0xdb, 0xf3, 0x81, 0xa4, 0xea, 0x3e, 0x24, 0xfd, 0xd6, 0xd9, 0x59, 0x60, 0x24, 0x47, 0xeb, 0x9c, - 0x36, 0x60, 0xf3, 0x68, 0x1b, 0xe1, 0x03, 0x48, 0x8a, 0xd9, 0x80, 0x23, 0x3b, 0x27, 0x3c, 0x42, - 0x2e, 0xaa, 0x8e, 0xdd, 0x27, 0x67, 0xaf, 0x72, 0xb1, 0x3f, 0x5f, 0xe5, 0x62, 0xdf, 0x9d, 0xe7, - 0xd0, 0xd9, 0x79, 0x0e, 0xfd, 0x71, 0x9e, 0x43, 0x7f, 0x9f, 0xe7, 0xd0, 0x93, 0x8f, 0x17, 0xfd, - 0x1f, 0x7d, 0x47, 0x92, 0x5f, 0xc4, 0xaa, 0xaa, 0xd0, 0xb6, 0xf3, 0x6f, 0x00, 0x00, 0x00, 0xff, - 0xff, 0xc0, 0xc2, 0x35, 0xb1, 0x94, 0x0f, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// ContentClient is the client API for Content service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ContentClient interface { - // Info returns information about a committed object. - // - // This call can be used for getting the size of content and checking for - // existence. - Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) - // Update updates content metadata. - // - // This call can be used to manage the mutable content labels. The - // immutable metadata such as digest, size, and committed at cannot - // be updated. - Update(ctx context.Context, in *UpdateRequest, opts ...grpc.CallOption) (*UpdateResponse, error) - // List streams the entire set of content as Info objects and closes the - // stream. - // - // Typically, this will yield a large response, chunked into messages. - // Clients should make provisions to ensure they can handle the entire data - // set. - List(ctx context.Context, in *ListContentRequest, opts ...grpc.CallOption) (Content_ListClient, error) - // Delete will delete the referenced object. - Delete(ctx context.Context, in *DeleteContentRequest, opts ...grpc.CallOption) (*types.Empty, error) - // Read allows one to read an object based on the offset into the content. - // - // The requested data may be returned in one or more messages. - Read(ctx context.Context, in *ReadContentRequest, opts ...grpc.CallOption) (Content_ReadClient, error) - // Status returns the status for a single reference. - Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) - // ListStatuses returns the status of ongoing object ingestions, started via - // Write. - // - // Only those matching the regular expression will be provided in the - // response. If the provided regular expression is empty, all ingestions - // will be provided. - ListStatuses(ctx context.Context, in *ListStatusesRequest, opts ...grpc.CallOption) (*ListStatusesResponse, error) - // Write begins or resumes writes to a resource identified by a unique ref. - // Only one active stream may exist at a time for each ref. - // - // Once a write stream has started, it may only write to a single ref, thus - // once a stream is started, the ref may be omitted on subsequent writes. - // - // For any write transaction represented by a ref, only a single write may - // be made to a given offset. If overlapping writes occur, it is an error. - // Writes should be sequential and implementations may throw an error if - // this is required. - // - // If expected_digest is set and already part of the content store, the - // write will fail. - // - // When completed, the commit flag should be set to true. If expected size - // or digest is set, the content will be validated against those values. - Write(ctx context.Context, opts ...grpc.CallOption) (Content_WriteClient, error) - // Abort cancels the ongoing write named in the request. Any resources - // associated with the write will be collected. - Abort(ctx context.Context, in *AbortRequest, opts ...grpc.CallOption) (*types.Empty, error) -} - -type contentClient struct { - cc *grpc.ClientConn -} - -func NewContentClient(cc *grpc.ClientConn) ContentClient { - return &contentClient{cc} -} - -func (c *contentClient) Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) { - out := new(InfoResponse) - err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Info", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *contentClient) Update(ctx context.Context, in *UpdateRequest, opts ...grpc.CallOption) (*UpdateResponse, error) { - out := new(UpdateResponse) - err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Update", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *contentClient) List(ctx context.Context, in *ListContentRequest, opts ...grpc.CallOption) (Content_ListClient, error) { - stream, err := c.cc.NewStream(ctx, &_Content_serviceDesc.Streams[0], "/containerd.services.content.v1.Content/List", opts...) - if err != nil { - return nil, err + return ms } - x := &contentListClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Content_ListClient interface { - Recv() (*ListContentResponse, error) - grpc.ClientStream + return mi.MessageOf(x) } -type contentListClient struct { - grpc.ClientStream -} - -func (x *contentListClient) Recv() (*ListContentResponse, error) { - m := new(ListContentResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil +// Deprecated: Use WriteContentRequest.ProtoReflect.Descriptor instead. +func (*WriteContentRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{15} } -func (c *contentClient) Delete(ctx context.Context, in *DeleteContentRequest, opts ...grpc.CallOption) (*types.Empty, error) { - out := new(types.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Delete", in, out, opts...) - if err != nil { - return nil, err +func (x *WriteContentRequest) GetAction() WriteAction { + if x != nil { + return x.Action } - return out, nil + return WriteAction_STAT } -func (c *contentClient) Read(ctx context.Context, in *ReadContentRequest, opts ...grpc.CallOption) (Content_ReadClient, error) { - stream, err := c.cc.NewStream(ctx, &_Content_serviceDesc.Streams[1], "/containerd.services.content.v1.Content/Read", opts...) - if err != nil { - return nil, err +func (x *WriteContentRequest) GetRef() string { + if x != nil { + return x.Ref } - x := &contentReadClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Content_ReadClient interface { - Recv() (*ReadContentResponse, error) - grpc.ClientStream + return "" } -type contentReadClient struct { - grpc.ClientStream -} - -func (x *contentReadClient) Recv() (*ReadContentResponse, error) { - m := new(ReadContentResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err +func (x *WriteContentRequest) GetTotal() int64 { + if x != nil { + return x.Total } - return m, nil + return 0 } -func (c *contentClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { - out := new(StatusResponse) - err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Status", in, out, opts...) - if err != nil { - return nil, err +func (x *WriteContentRequest) GetExpected() string { + if x != nil { + return x.Expected } - return out, nil + return "" } -func (c *contentClient) ListStatuses(ctx context.Context, in *ListStatusesRequest, opts ...grpc.CallOption) (*ListStatusesResponse, error) { - out := new(ListStatusesResponse) - err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/ListStatuses", in, out, opts...) - if err != nil { - return nil, err +func (x *WriteContentRequest) GetOffset() int64 { + if x != nil { + return x.Offset } - return out, nil + return 0 } -func (c *contentClient) Write(ctx context.Context, opts ...grpc.CallOption) (Content_WriteClient, error) { - stream, err := c.cc.NewStream(ctx, &_Content_serviceDesc.Streams[2], "/containerd.services.content.v1.Content/Write", opts...) - if err != nil { - return nil, err +func (x *WriteContentRequest) GetData() []byte { + if x != nil { + return x.Data } - x := &contentWriteClient{stream} - return x, nil -} - -type Content_WriteClient interface { - Send(*WriteContentRequest) error - Recv() (*WriteContentResponse, error) - grpc.ClientStream -} - -type contentWriteClient struct { - grpc.ClientStream -} - -func (x *contentWriteClient) Send(m *WriteContentRequest) error { - return x.ClientStream.SendMsg(m) + return nil } -func (x *contentWriteClient) Recv() (*WriteContentResponse, error) { - m := new(WriteContentResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err +func (x *WriteContentRequest) GetLabels() map[string]string { + if x != nil { + return x.Labels } - return m, nil + return nil } -func (c *contentClient) Abort(ctx context.Context, in *AbortRequest, opts ...grpc.CallOption) (*types.Empty, error) { - out := new(types.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Abort", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} +// WriteContentResponse is returned on the culmination of a write call. +type WriteContentResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -// ContentServer is the server API for Content service. -type ContentServer interface { - // Info returns information about a committed object. - // - // This call can be used for getting the size of content and checking for - // existence. - Info(context.Context, *InfoRequest) (*InfoResponse, error) - // Update updates content metadata. - // - // This call can be used to manage the mutable content labels. The - // immutable metadata such as digest, size, and committed at cannot - // be updated. - Update(context.Context, *UpdateRequest) (*UpdateResponse, error) - // List streams the entire set of content as Info objects and closes the - // stream. - // - // Typically, this will yield a large response, chunked into messages. - // Clients should make provisions to ensure they can handle the entire data - // set. - List(*ListContentRequest, Content_ListServer) error - // Delete will delete the referenced object. - Delete(context.Context, *DeleteContentRequest) (*types.Empty, error) - // Read allows one to read an object based on the offset into the content. - // - // The requested data may be returned in one or more messages. - Read(*ReadContentRequest, Content_ReadServer) error - // Status returns the status for a single reference. - Status(context.Context, *StatusRequest) (*StatusResponse, error) - // ListStatuses returns the status of ongoing object ingestions, started via - // Write. - // - // Only those matching the regular expression will be provided in the - // response. If the provided regular expression is empty, all ingestions - // will be provided. - ListStatuses(context.Context, *ListStatusesRequest) (*ListStatusesResponse, error) - // Write begins or resumes writes to a resource identified by a unique ref. - // Only one active stream may exist at a time for each ref. + // Action contains the action for the final message of the stream. A writer + // should confirm that they match the intended result. + Action WriteAction `protobuf:"varint,1,opt,name=action,proto3,enum=containerd.services.content.v1.WriteAction" json:"action,omitempty"` + // StartedAt provides the time at which the write began. // - // Once a write stream has started, it may only write to a single ref, thus - // once a stream is started, the ref may be omitted on subsequent writes. + // This must be set for stat and commit write actions. All other write + // actions may omit this. + StartedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` + // UpdatedAt provides the last time of a successful write. // - // For any write transaction represented by a ref, only a single write may - // be made to a given offset. If overlapping writes occur, it is an error. - // Writes should be sequential and implementations may throw an error if - // this is required. + // This must be set for stat and commit write actions. All other write + // actions may omit this. + UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` + // Offset is the current committed size for the write. + Offset int64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"` + // Total provides the current, expected total size of the write. // - // If expected_digest is set and already part of the content store, the - // write will fail. + // We include this to provide consistency with the Status structure on the + // client writer. // - // When completed, the commit flag should be set to true. If expected size - // or digest is set, the content will be validated against those values. - Write(Content_WriteServer) error - // Abort cancels the ongoing write named in the request. Any resources - // associated with the write will be collected. - Abort(context.Context, *AbortRequest) (*types.Empty, error) -} - -// UnimplementedContentServer can be embedded to have forward compatible implementations. -type UnimplementedContentServer struct { -} - -func (*UnimplementedContentServer) Info(ctx context.Context, req *InfoRequest) (*InfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Info not implemented") -} -func (*UnimplementedContentServer) Update(ctx context.Context, req *UpdateRequest) (*UpdateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") -} -func (*UnimplementedContentServer) List(req *ListContentRequest, srv Content_ListServer) error { - return status.Errorf(codes.Unimplemented, "method List not implemented") -} -func (*UnimplementedContentServer) Delete(ctx context.Context, req *DeleteContentRequest) (*types.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") -} -func (*UnimplementedContentServer) Read(req *ReadContentRequest, srv Content_ReadServer) error { - return status.Errorf(codes.Unimplemented, "method Read not implemented") -} -func (*UnimplementedContentServer) Status(ctx context.Context, req *StatusRequest) (*StatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") -} -func (*UnimplementedContentServer) ListStatuses(ctx context.Context, req *ListStatusesRequest) (*ListStatusesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListStatuses not implemented") -} -func (*UnimplementedContentServer) Write(srv Content_WriteServer) error { - return status.Errorf(codes.Unimplemented, "method Write not implemented") -} -func (*UnimplementedContentServer) Abort(ctx context.Context, req *AbortRequest) (*types.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Abort not implemented") -} - -func RegisterContentServer(s *grpc.Server, srv ContentServer) { - s.RegisterService(&_Content_serviceDesc, srv) -} - -func _Content_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(InfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContentServer).Info(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.content.v1.Content/Info", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContentServer).Info(ctx, req.(*InfoRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Content_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContentServer).Update(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.content.v1.Content/Update", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContentServer).Update(ctx, req.(*UpdateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Content_List_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(ListContentRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(ContentServer).List(m, &contentListServer{stream}) -} - -type Content_ListServer interface { - Send(*ListContentResponse) error - grpc.ServerStream -} - -type contentListServer struct { - grpc.ServerStream -} - -func (x *contentListServer) Send(m *ListContentResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Content_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteContentRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContentServer).Delete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.content.v1.Content/Delete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContentServer).Delete(ctx, req.(*DeleteContentRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Content_Read_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(ReadContentRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(ContentServer).Read(m, &contentReadServer{stream}) -} - -type Content_ReadServer interface { - Send(*ReadContentResponse) error - grpc.ServerStream -} - -type contentReadServer struct { - grpc.ServerStream -} - -func (x *contentReadServer) Send(m *ReadContentResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Content_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StatusRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContentServer).Status(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.content.v1.Content/Status", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContentServer).Status(ctx, req.(*StatusRequest)) - } - return interceptor(ctx, in, info, handler) + // This is only valid on the Stat and Commit response. + Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"` + // Digest, if present, includes the digest up to the currently committed + // bytes. If action is commit, this field will be set. It is implementation + // defined if this is set for other actions. + Digest string `protobuf:"bytes,6,opt,name=digest,proto3" json:"digest,omitempty"` } -func _Content_ListStatuses_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListStatusesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContentServer).ListStatuses(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.content.v1.Content/ListStatuses", +func (x *WriteContentResponse) Reset() { + *x = WriteContentResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContentServer).ListStatuses(ctx, req.(*ListStatusesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Content_Write_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(ContentServer).Write(&contentWriteServer{stream}) -} - -type Content_WriteServer interface { - Send(*WriteContentResponse) error - Recv() (*WriteContentRequest, error) - grpc.ServerStream -} - -type contentWriteServer struct { - grpc.ServerStream } -func (x *contentWriteServer) Send(m *WriteContentResponse) error { - return x.ServerStream.SendMsg(m) +func (x *WriteContentResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (x *contentWriteServer) Recv() (*WriteContentRequest, error) { - m := new(WriteContentRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +func (*WriteContentResponse) ProtoMessage() {} -func _Content_Abort_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AbortRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContentServer).Abort(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.content.v1.Content/Abort", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContentServer).Abort(ctx, req.(*AbortRequest)) +func (x *WriteContentResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return interceptor(ctx, in, info, handler) + return mi.MessageOf(x) } -var _Content_serviceDesc = grpc.ServiceDesc{ - ServiceName: "containerd.services.content.v1.Content", - HandlerType: (*ContentServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Info", - Handler: _Content_Info_Handler, - }, - { - MethodName: "Update", - Handler: _Content_Update_Handler, - }, - { - MethodName: "Delete", - Handler: _Content_Delete_Handler, - }, - { - MethodName: "Status", - Handler: _Content_Status_Handler, - }, - { - MethodName: "ListStatuses", - Handler: _Content_ListStatuses_Handler, - }, - { - MethodName: "Abort", - Handler: _Content_Abort_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "List", - Handler: _Content_List_Handler, - ServerStreams: true, - }, - { - StreamName: "Read", - Handler: _Content_Read_Handler, - ServerStreams: true, - }, - { - StreamName: "Write", - Handler: _Content_Write_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "github.com/containerd/containerd/api/services/content/v1/content.proto", +// Deprecated: Use WriteContentResponse.ProtoReflect.Descriptor instead. +func (*WriteContentResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{16} } -func (m *Info) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *WriteContentResponse) GetAction() WriteAction { + if x != nil { + return x.Action } - return dAtA[:n], nil + return WriteAction_STAT } -func (m *Info) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Info) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintContent(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintContent(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintContent(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x2a - } - } - n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt):]) - if err1 != nil { - return 0, err1 - } - i -= n1 - i = encodeVarintContent(dAtA, i, uint64(n1)) - i-- - dAtA[i] = 0x22 - n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt):]) - if err2 != nil { - return 0, err2 - } - i -= n2 - i = encodeVarintContent(dAtA, i, uint64(n2)) - i-- - dAtA[i] = 0x1a - if m.Size_ != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Size_)) - i-- - dAtA[i] = 0x10 - } - if len(m.Digest) > 0 { - i -= len(m.Digest) - copy(dAtA[i:], m.Digest) - i = encodeVarintContent(dAtA, i, uint64(len(m.Digest))) - i-- - dAtA[i] = 0xa +func (x *WriteContentResponse) GetStartedAt() *timestamppb.Timestamp { + if x != nil { + return x.StartedAt } - return len(dAtA) - i, nil + return nil } -func (m *InfoRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *WriteContentResponse) GetUpdatedAt() *timestamppb.Timestamp { + if x != nil { + return x.UpdatedAt } - return dAtA[:n], nil -} - -func (m *InfoRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return nil } -func (m *InfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) +func (x *WriteContentResponse) GetOffset() int64 { + if x != nil { + return x.Offset } - if len(m.Digest) > 0 { - i -= len(m.Digest) - copy(dAtA[i:], m.Digest) - i = encodeVarintContent(dAtA, i, uint64(len(m.Digest))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return 0 } -func (m *InfoResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *WriteContentResponse) GetTotal() int64 { + if x != nil { + return x.Total } - return dAtA[:n], nil -} - -func (m *InfoResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return 0 } -func (m *InfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContent(dAtA, i, uint64(size)) +func (x *WriteContentResponse) GetDigest() string { + if x != nil { + return x.Digest } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return "" } -func (m *UpdateRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} +type AbortRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *UpdateRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + Ref string `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` } -func (m *UpdateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.UpdateMask != nil { - { - size, err := m.UpdateMask.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContent(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContent(dAtA, i, uint64(size)) +func (x *AbortRequest) Reset() { + *x = AbortRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil } -func (m *UpdateResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (x *AbortRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *UpdateResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +func (*AbortRequest) ProtoMessage() {} -func (m *UpdateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err +func (x *AbortRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - i -= size - i = encodeVarintContent(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ListContentRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *ListContentRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListContentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Filters) > 0 { - for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Filters[iNdEx]) - copy(dAtA[i:], m.Filters[iNdEx]) - i = encodeVarintContent(dAtA, i, uint64(len(m.Filters[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil +// Deprecated: Use AbortRequest.ProtoReflect.Descriptor instead. +func (*AbortRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP(), []int{17} +} + +func (x *AbortRequest) GetRef() string { + if x != nil { + return x.Ref + } + return "" +} + +var File_github_com_containerd_containerd_api_services_content_v1_content_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDesc = []byte{ + 0x0a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, + 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, + 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xad, 0x02, 0x0a, 0x04, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x39, 0x0a, + 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x64, 0x41, 0x74, 0x12, 0x48, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, + 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x25, 0x0a, 0x0b, 0x49, 0x6e, 0x66, 0x6f, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, + 0x48, 0x0a, 0x0c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x38, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x86, 0x01, 0x0a, 0x0d, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x69, + 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, + 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, + 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, + 0x73, 0x6b, 0x22, 0x4a, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x2e, + 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x22, 0x4f, + 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x22, + 0x2e, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, + 0x58, 0x0a, 0x12, 0x52, 0x65, 0x61, 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, + 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, + 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x22, 0x41, 0x0a, 0x13, 0x52, 0x65, 0x61, + 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0xda, 0x01, 0x0a, + 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, + 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x10, 0x0a, + 0x03, 0x72, 0x65, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12, + 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x1a, 0x0a, + 0x08, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x22, 0x21, 0x0a, 0x0d, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x65, + 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, 0x22, 0x50, 0x0a, 0x0e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, + 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x2f, + 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x22, + 0x5a, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x22, 0xde, 0x02, 0x0a, 0x13, + 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x43, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, + 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, + 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x66, + 0x66, 0x73, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x57, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x97, 0x02, 0x0a, + 0x14, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x43, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, + 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, + 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61, + 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x16, + 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x20, 0x0a, 0x0c, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, 0x2a, 0x2e, 0x0a, 0x0b, 0x57, 0x72, 0x69, 0x74, + 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x54, 0x41, 0x54, 0x10, + 0x00, 0x12, 0x09, 0x0a, 0x05, 0x57, 0x52, 0x49, 0x54, 0x45, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, + 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x02, 0x32, 0xbe, 0x07, 0x0a, 0x07, 0x43, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x12, 0x61, 0x0a, 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2b, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, + 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x67, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x12, 0x2d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, + 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, + 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x71, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x30, 0x01, 0x12, 0x56, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x34, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x71, 0x0a, 0x04, 0x52, + 0x65, 0x61, 0x64, 0x12, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x43, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x67, + 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x79, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x33, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x76, 0x0a, 0x05, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x33, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x72, 0x69, + 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x34, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2e, 0x76, + 0x31, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4d, 0x0a, 0x05, 0x41, 0x62, + 0x6f, 0x72, 0x74, 0x12, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x42, 0x5a, 0x40, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x2f, 0x76, 0x31, 0x3b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func (m *ListContentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} +var ( + file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescData = file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDesc +) -func (m *ListContentResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_content_v1_content_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes = make([]protoimpl.MessageInfo, 20) +var file_github_com_containerd_containerd_api_services_content_v1_content_proto_goTypes = []interface{}{ + (WriteAction)(0), // 0: containerd.services.content.v1.WriteAction + (*Info)(nil), // 1: containerd.services.content.v1.Info + (*InfoRequest)(nil), // 2: containerd.services.content.v1.InfoRequest + (*InfoResponse)(nil), // 3: containerd.services.content.v1.InfoResponse + (*UpdateRequest)(nil), // 4: containerd.services.content.v1.UpdateRequest + (*UpdateResponse)(nil), // 5: containerd.services.content.v1.UpdateResponse + (*ListContentRequest)(nil), // 6: containerd.services.content.v1.ListContentRequest + (*ListContentResponse)(nil), // 7: containerd.services.content.v1.ListContentResponse + (*DeleteContentRequest)(nil), // 8: containerd.services.content.v1.DeleteContentRequest + (*ReadContentRequest)(nil), // 9: containerd.services.content.v1.ReadContentRequest + (*ReadContentResponse)(nil), // 10: containerd.services.content.v1.ReadContentResponse + (*Status)(nil), // 11: containerd.services.content.v1.Status + (*StatusRequest)(nil), // 12: containerd.services.content.v1.StatusRequest + (*StatusResponse)(nil), // 13: containerd.services.content.v1.StatusResponse + (*ListStatusesRequest)(nil), // 14: containerd.services.content.v1.ListStatusesRequest + (*ListStatusesResponse)(nil), // 15: containerd.services.content.v1.ListStatusesResponse + (*WriteContentRequest)(nil), // 16: containerd.services.content.v1.WriteContentRequest + (*WriteContentResponse)(nil), // 17: containerd.services.content.v1.WriteContentResponse + (*AbortRequest)(nil), // 18: containerd.services.content.v1.AbortRequest + nil, // 19: containerd.services.content.v1.Info.LabelsEntry + nil, // 20: containerd.services.content.v1.WriteContentRequest.LabelsEntry + (*timestamppb.Timestamp)(nil), // 21: google.protobuf.Timestamp + (*fieldmaskpb.FieldMask)(nil), // 22: google.protobuf.FieldMask + (*emptypb.Empty)(nil), // 23: google.protobuf.Empty +} +var file_github_com_containerd_containerd_api_services_content_v1_content_proto_depIdxs = []int32{ + 21, // 0: containerd.services.content.v1.Info.created_at:type_name -> google.protobuf.Timestamp + 21, // 1: containerd.services.content.v1.Info.updated_at:type_name -> google.protobuf.Timestamp + 19, // 2: containerd.services.content.v1.Info.labels:type_name -> containerd.services.content.v1.Info.LabelsEntry + 1, // 3: containerd.services.content.v1.InfoResponse.info:type_name -> containerd.services.content.v1.Info + 1, // 4: containerd.services.content.v1.UpdateRequest.info:type_name -> containerd.services.content.v1.Info + 22, // 5: containerd.services.content.v1.UpdateRequest.update_mask:type_name -> google.protobuf.FieldMask + 1, // 6: containerd.services.content.v1.UpdateResponse.info:type_name -> containerd.services.content.v1.Info + 1, // 7: containerd.services.content.v1.ListContentResponse.info:type_name -> containerd.services.content.v1.Info + 21, // 8: containerd.services.content.v1.Status.started_at:type_name -> google.protobuf.Timestamp + 21, // 9: containerd.services.content.v1.Status.updated_at:type_name -> google.protobuf.Timestamp + 11, // 10: containerd.services.content.v1.StatusResponse.status:type_name -> containerd.services.content.v1.Status + 11, // 11: containerd.services.content.v1.ListStatusesResponse.statuses:type_name -> containerd.services.content.v1.Status + 0, // 12: containerd.services.content.v1.WriteContentRequest.action:type_name -> containerd.services.content.v1.WriteAction + 20, // 13: containerd.services.content.v1.WriteContentRequest.labels:type_name -> containerd.services.content.v1.WriteContentRequest.LabelsEntry + 0, // 14: containerd.services.content.v1.WriteContentResponse.action:type_name -> containerd.services.content.v1.WriteAction + 21, // 15: containerd.services.content.v1.WriteContentResponse.started_at:type_name -> google.protobuf.Timestamp + 21, // 16: containerd.services.content.v1.WriteContentResponse.updated_at:type_name -> google.protobuf.Timestamp + 2, // 17: containerd.services.content.v1.Content.Info:input_type -> containerd.services.content.v1.InfoRequest + 4, // 18: containerd.services.content.v1.Content.Update:input_type -> containerd.services.content.v1.UpdateRequest + 6, // 19: containerd.services.content.v1.Content.List:input_type -> containerd.services.content.v1.ListContentRequest + 8, // 20: containerd.services.content.v1.Content.Delete:input_type -> containerd.services.content.v1.DeleteContentRequest + 9, // 21: containerd.services.content.v1.Content.Read:input_type -> containerd.services.content.v1.ReadContentRequest + 12, // 22: containerd.services.content.v1.Content.Status:input_type -> containerd.services.content.v1.StatusRequest + 14, // 23: containerd.services.content.v1.Content.ListStatuses:input_type -> containerd.services.content.v1.ListStatusesRequest + 16, // 24: containerd.services.content.v1.Content.Write:input_type -> containerd.services.content.v1.WriteContentRequest + 18, // 25: containerd.services.content.v1.Content.Abort:input_type -> containerd.services.content.v1.AbortRequest + 3, // 26: containerd.services.content.v1.Content.Info:output_type -> containerd.services.content.v1.InfoResponse + 5, // 27: containerd.services.content.v1.Content.Update:output_type -> containerd.services.content.v1.UpdateResponse + 7, // 28: containerd.services.content.v1.Content.List:output_type -> containerd.services.content.v1.ListContentResponse + 23, // 29: containerd.services.content.v1.Content.Delete:output_type -> google.protobuf.Empty + 10, // 30: containerd.services.content.v1.Content.Read:output_type -> containerd.services.content.v1.ReadContentResponse + 13, // 31: containerd.services.content.v1.Content.Status:output_type -> containerd.services.content.v1.StatusResponse + 15, // 32: containerd.services.content.v1.Content.ListStatuses:output_type -> containerd.services.content.v1.ListStatusesResponse + 17, // 33: containerd.services.content.v1.Content.Write:output_type -> containerd.services.content.v1.WriteContentResponse + 23, // 34: containerd.services.content.v1.Content.Abort:output_type -> google.protobuf.Empty + 26, // [26:35] is the sub-list for method output_type + 17, // [17:26] is the sub-list for method input_type + 17, // [17:17] is the sub-list for extension type_name + 17, // [17:17] is the sub-list for extension extendee + 0, // [0:17] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_services_content_v1_content_proto_init() } +func file_github_com_containerd_containerd_api_services_content_v1_content_proto_init() { + if File_github_com_containerd_containerd_api_services_content_v1_content_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Info); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InfoRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InfoResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListContentRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListContentResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteContentRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadContentRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReadContentResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListStatusesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListStatusesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WriteContentRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WriteContentResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AbortRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDesc, + NumEnums: 1, + NumMessages: 20, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_services_content_v1_content_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_content_v1_content_proto_depIdxs, + EnumInfos: file_github_com_containerd_containerd_api_services_content_v1_content_proto_enumTypes, + MessageInfos: file_github_com_containerd_containerd_api_services_content_v1_content_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_content_v1_content_proto = out.File + file_github_com_containerd_containerd_api_services_content_v1_content_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_content_v1_content_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_content_v1_content_proto_depIdxs = nil } - -func (m *ListContentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Info) > 0 { - for iNdEx := len(m.Info) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Info[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContent(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *DeleteContentRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteContentRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteContentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Digest) > 0 { - i -= len(m.Digest) - copy(dAtA[i:], m.Digest) - i = encodeVarintContent(dAtA, i, uint64(len(m.Digest))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ReadContentRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReadContentRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReadContentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Size_ != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Size_)) - i-- - dAtA[i] = 0x18 - } - if m.Offset != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Offset)) - i-- - dAtA[i] = 0x10 - } - if len(m.Digest) > 0 { - i -= len(m.Digest) - copy(dAtA[i:], m.Digest) - i = encodeVarintContent(dAtA, i, uint64(len(m.Digest))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ReadContentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReadContentResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReadContentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintContent(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x12 - } - if m.Offset != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Offset)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Status) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Status) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Status) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Expected) > 0 { - i -= len(m.Expected) - copy(dAtA[i:], m.Expected) - i = encodeVarintContent(dAtA, i, uint64(len(m.Expected))) - i-- - dAtA[i] = 0x32 - } - if m.Total != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Total)) - i-- - dAtA[i] = 0x28 - } - if m.Offset != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Offset)) - i-- - dAtA[i] = 0x20 - } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintContent(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0x1a - } - n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt):]) - if err7 != nil { - return 0, err7 - } - i -= n7 - i = encodeVarintContent(dAtA, i, uint64(n7)) - i-- - dAtA[i] = 0x12 - n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt):]) - if err8 != nil { - return 0, err8 - } - i -= n8 - i = encodeVarintContent(dAtA, i, uint64(n8)) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *StatusRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintContent(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *StatusResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Status != nil { - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContent(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ListStatusesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListStatusesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListStatusesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Filters) > 0 { - for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Filters[iNdEx]) - copy(dAtA[i:], m.Filters[iNdEx]) - i = encodeVarintContent(dAtA, i, uint64(len(m.Filters[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ListStatusesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListStatusesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListStatusesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Statuses) > 0 { - for iNdEx := len(m.Statuses) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Statuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContent(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *WriteContentRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WriteContentRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WriteContentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintContent(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintContent(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintContent(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x3a - } - } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintContent(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x32 - } - if m.Offset != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Offset)) - i-- - dAtA[i] = 0x28 - } - if len(m.Expected) > 0 { - i -= len(m.Expected) - copy(dAtA[i:], m.Expected) - i = encodeVarintContent(dAtA, i, uint64(len(m.Expected))) - i-- - dAtA[i] = 0x22 - } - if m.Total != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Total)) - i-- - dAtA[i] = 0x18 - } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintContent(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0x12 - } - if m.Action != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Action)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *WriteContentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WriteContentResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WriteContentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Digest) > 0 { - i -= len(m.Digest) - copy(dAtA[i:], m.Digest) - i = encodeVarintContent(dAtA, i, uint64(len(m.Digest))) - i-- - dAtA[i] = 0x32 - } - if m.Total != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Total)) - i-- - dAtA[i] = 0x28 - } - if m.Offset != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Offset)) - i-- - dAtA[i] = 0x20 - } - n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt):]) - if err10 != nil { - return 0, err10 - } - i -= n10 - i = encodeVarintContent(dAtA, i, uint64(n10)) - i-- - dAtA[i] = 0x1a - n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt):]) - if err11 != nil { - return 0, err11 - } - i -= n11 - i = encodeVarintContent(dAtA, i, uint64(n11)) - i-- - dAtA[i] = 0x12 - if m.Action != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Action)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *AbortRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AbortRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AbortRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintContent(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintContent(dAtA []byte, offset int, v uint64) int { - offset -= sovContent(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Info) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Digest) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.Size_ != 0 { - n += 1 + sovContent(uint64(m.Size_)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt) - n += 1 + l + sovContent(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt) - n += 1 + l + sovContent(uint64(l)) - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovContent(uint64(len(k))) + 1 + len(v) + sovContent(uint64(len(v))) - n += mapEntrySize + 1 + sovContent(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *InfoRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Digest) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *InfoResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Info.Size() - n += 1 + l + sovContent(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UpdateRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Info.Size() - n += 1 + l + sovContent(uint64(l)) - if m.UpdateMask != nil { - l = m.UpdateMask.Size() - n += 1 + l + sovContent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UpdateResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Info.Size() - n += 1 + l + sovContent(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListContentRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Filters) > 0 { - for _, s := range m.Filters { - l = len(s) - n += 1 + l + sovContent(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListContentResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Info) > 0 { - for _, e := range m.Info { - l = e.Size() - n += 1 + l + sovContent(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DeleteContentRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Digest) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ReadContentRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Digest) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.Offset != 0 { - n += 1 + sovContent(uint64(m.Offset)) - } - if m.Size_ != 0 { - n += 1 + sovContent(uint64(m.Size_)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ReadContentResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Offset != 0 { - n += 1 + sovContent(uint64(m.Offset)) - } - l = len(m.Data) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Status) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt) - n += 1 + l + sovContent(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt) - n += 1 + l + sovContent(uint64(l)) - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.Offset != 0 { - n += 1 + sovContent(uint64(m.Offset)) - } - if m.Total != 0 { - n += 1 + sovContent(uint64(m.Total)) - } - l = len(m.Expected) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StatusRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StatusResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Status != nil { - l = m.Status.Size() - n += 1 + l + sovContent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListStatusesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Filters) > 0 { - for _, s := range m.Filters { - l = len(s) - n += 1 + l + sovContent(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListStatusesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Statuses) > 0 { - for _, e := range m.Statuses { - l = e.Size() - n += 1 + l + sovContent(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *WriteContentRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Action != 0 { - n += 1 + sovContent(uint64(m.Action)) - } - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.Total != 0 { - n += 1 + sovContent(uint64(m.Total)) - } - l = len(m.Expected) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.Offset != 0 { - n += 1 + sovContent(uint64(m.Offset)) - } - l = len(m.Data) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovContent(uint64(len(k))) + 1 + len(v) + sovContent(uint64(len(v))) - n += mapEntrySize + 1 + sovContent(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *WriteContentResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Action != 0 { - n += 1 + sovContent(uint64(m.Action)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt) - n += 1 + l + sovContent(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt) - n += 1 + l + sovContent(uint64(l)) - if m.Offset != 0 { - n += 1 + sovContent(uint64(m.Offset)) - } - if m.Total != 0 { - n += 1 + sovContent(uint64(m.Total)) - } - l = len(m.Digest) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AbortRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovContent(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozContent(x uint64) (n int) { - return sovContent(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Info) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&Info{`, - `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, - `Size_:` + fmt.Sprintf("%v", this.Size_) + `,`, - `CreatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CreatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `UpdatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UpdatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *InfoRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&InfoRequest{`, - `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *InfoResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&InfoResponse{`, - `Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UpdateRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UpdateRequest{`, - `Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`, - `UpdateMask:` + strings.Replace(fmt.Sprintf("%v", this.UpdateMask), "FieldMask", "types.FieldMask", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UpdateResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UpdateResponse{`, - `Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListContentRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ListContentRequest{`, - `Filters:` + fmt.Sprintf("%v", this.Filters) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListContentResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForInfo := "[]Info{" - for _, f := range this.Info { - repeatedStringForInfo += strings.Replace(strings.Replace(f.String(), "Info", "Info", 1), `&`, ``, 1) + "," - } - repeatedStringForInfo += "}" - s := strings.Join([]string{`&ListContentResponse{`, - `Info:` + repeatedStringForInfo + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DeleteContentRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeleteContentRequest{`, - `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ReadContentRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReadContentRequest{`, - `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, - `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`, - `Size_:` + fmt.Sprintf("%v", this.Size_) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ReadContentResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReadContentResponse{`, - `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`, - `Data:` + fmt.Sprintf("%v", this.Data) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *Status) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Status{`, - `StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `UpdatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UpdatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, - `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`, - `Total:` + fmt.Sprintf("%v", this.Total) + `,`, - `Expected:` + fmt.Sprintf("%v", this.Expected) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *StatusRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatusRequest{`, - `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *StatusResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatusResponse{`, - `Status:` + strings.Replace(this.Status.String(), "Status", "Status", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListStatusesRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ListStatusesRequest{`, - `Filters:` + fmt.Sprintf("%v", this.Filters) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListStatusesResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForStatuses := "[]Status{" - for _, f := range this.Statuses { - repeatedStringForStatuses += strings.Replace(strings.Replace(f.String(), "Status", "Status", 1), `&`, ``, 1) + "," - } - repeatedStringForStatuses += "}" - s := strings.Join([]string{`&ListStatusesResponse{`, - `Statuses:` + repeatedStringForStatuses + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *WriteContentRequest) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&WriteContentRequest{`, - `Action:` + fmt.Sprintf("%v", this.Action) + `,`, - `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, - `Total:` + fmt.Sprintf("%v", this.Total) + `,`, - `Expected:` + fmt.Sprintf("%v", this.Expected) + `,`, - `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`, - `Data:` + fmt.Sprintf("%v", this.Data) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *WriteContentResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WriteContentResponse{`, - `Action:` + fmt.Sprintf("%v", this.Action) + `,`, - `StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `UpdatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UpdatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`, - `Total:` + fmt.Sprintf("%v", this.Total) + `,`, - `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *AbortRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AbortRequest{`, - `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringContent(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Info) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Info: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Info: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) - } - m.Size_ = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Size_ |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthContent - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthContent - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthContent - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthContent - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InfoRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InfoRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InfoResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InfoResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UpdateRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateMask", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.UpdateMask == nil { - m.UpdateMask = &types.FieldMask{} - } - if err := m.UpdateMask.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UpdateResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListContentRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListContentRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListContentRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListContentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListContentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListContentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Info = append(m.Info, Info{}) - if err := m.Info[len(m.Info)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteContentRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteContentRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteContentRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReadContentRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadContentRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadContentRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) - } - m.Offset = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Offset |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) - } - m.Size_ = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Size_ |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReadContentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadContentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadContentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) - } - m.Offset = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Offset |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Status) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Status: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.StartedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) - } - m.Offset = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Offset |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) - } - m.Total = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Total |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Expected", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Expected = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatusRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatusResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Status == nil { - m.Status = &Status{} - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListStatusesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListStatusesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListStatusesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListStatusesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListStatusesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListStatusesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Statuses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Statuses = append(m.Statuses, Status{}) - if err := m.Statuses[len(m.Statuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WriteContentRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WriteContentRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WriteContentRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) - } - m.Action = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Action |= WriteAction(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) - } - m.Total = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Total |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Expected", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Expected = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) - } - m.Offset = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Offset |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthContent - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthContent - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthContent - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthContent - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WriteContentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WriteContentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WriteContentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) - } - m.Action = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Action |= WriteAction(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.StartedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) - } - m.Offset = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Offset |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) - } - m.Total = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Total |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AbortRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AbortRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AbortRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipContent(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowContent - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowContent - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowContent - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthContent - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupContent - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthContent - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthContent = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowContent = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupContent = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto b/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto index b33ea5b2e8c4f..8aea0636b84f1 100644 --- a/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto +++ b/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto @@ -18,7 +18,6 @@ syntax = "proto3"; package containerd.services.content.v1; -import weak "gogoproto/gogo.proto"; import "google/protobuf/field_mask.proto"; import "google/protobuf/timestamp.proto"; import "google/protobuf/empty.proto"; @@ -92,16 +91,16 @@ service Content { message Info { // Digest is the hash identity of the blob. - string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + string digest = 1; // Size is the total number of bytes in the blob. int64 size = 2; // CreatedAt provides the time at which the blob was committed. - google.protobuf.Timestamp created_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp created_at = 3; // UpdatedAt provides the time the info was last updated. - google.protobuf.Timestamp updated_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp updated_at = 4; // Labels are arbitrary data on snapshots. // @@ -110,15 +109,15 @@ message Info { } message InfoRequest { - string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + string digest = 1; } message InfoResponse { - Info info = 1 [(gogoproto.nullable) = false]; + Info info = 1; } message UpdateRequest { - Info info = 1 [(gogoproto.nullable) = false]; + Info info = 1; // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. @@ -130,7 +129,7 @@ message UpdateRequest { } message UpdateResponse { - Info info = 1 [(gogoproto.nullable) = false]; + Info info = 1; } message ListContentRequest { @@ -141,26 +140,26 @@ message ListContentRequest { // filters. Expanded, containers that match the following will be // returned: // - // filters[0] or filters[1] or ... or filters[n-1] or filters[n] + // filters[0] or filters[1] or ... or filters[n-1] or filters[n] // // If filters is zero-length or nil, all items will be returned. repeated string filters = 1; } message ListContentResponse { - repeated Info info = 1 [(gogoproto.nullable) = false]; + repeated Info info = 1; } message DeleteContentRequest { // Digest specifies which content to delete. - string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + string digest = 1; } // ReadContentRequest defines the fields that make up a request to read a portion of // data from a stored object. message ReadContentRequest { // Digest is the hash identity to read. - string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + string digest = 1; // Offset specifies the number of bytes from the start at which to begin // the read. If zero or less, the read will be from the start. This uses @@ -179,12 +178,12 @@ message ReadContentResponse { } message Status { - google.protobuf.Timestamp started_at = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp updated_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp started_at = 1; + google.protobuf.Timestamp updated_at = 2; string ref = 3; int64 offset = 4; int64 total = 5; - string expected = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + string expected = 6; } @@ -201,17 +200,14 @@ message ListStatusesRequest { } message ListStatusesResponse { - repeated Status statuses = 1 [(gogoproto.nullable) = false]; + repeated Status statuses = 1; } // WriteAction defines the behavior of a WriteRequest. enum WriteAction { - option (gogoproto.goproto_enum_prefix) = false; - option (gogoproto.enum_customname) = "WriteAction"; - // WriteActionStat instructs the writer to return the current status while // holding the lock on the write. - STAT = 0 [(gogoproto.enumvalue_customname) = "WriteActionStat"]; + STAT = 0; // WriteActionWrite sets the action for the write request to write data. // @@ -219,7 +215,7 @@ enum WriteAction { // transaction will be left open for further writes. // // This is the default. - WRITE = 1 [(gogoproto.enumvalue_customname) = "WriteActionWrite"]; + WRITE = 1; // WriteActionCommit will write any outstanding data in the message and // commit the write, storing it under the digest. @@ -228,7 +224,7 @@ enum WriteAction { // commit it. // // This action will always terminate the write. - COMMIT = 2 [(gogoproto.enumvalue_customname) = "WriteActionCommit"]; + COMMIT = 2; } // WriteContentRequest writes data to the request ref at offset. @@ -269,7 +265,7 @@ message WriteContentRequest { // Only the latest version will be used to check the content against the // digest. It is only required to include it on a single message, before or // with the commit action message. - string expected = 4 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + string expected = 4; // Offset specifies the number of bytes from the start at which to begin // the write. For most implementations, this means from the start of the @@ -304,13 +300,13 @@ message WriteContentResponse { // // This must be set for stat and commit write actions. All other write // actions may omit this. - google.protobuf.Timestamp started_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp started_at = 2; // UpdatedAt provides the last time of a successful write. // // This must be set for stat and commit write actions. All other write // actions may omit this. - google.protobuf.Timestamp updated_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp updated_at = 3; // Offset is the current committed size for the write. int64 offset = 4; @@ -326,7 +322,7 @@ message WriteContentResponse { // Digest, if present, includes the digest up to the currently committed // bytes. If action is commit, this field will be set. It is implementation // defined if this is set for other actions. - string digest = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + string digest = 6; } message AbortRequest { diff --git a/vendor/github.com/containerd/containerd/api/services/content/v1/content_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/content/v1/content_grpc.pb.go new file mode 100644 index 0000000000000..e230c45a67050 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/content/v1/content_grpc.pb.go @@ -0,0 +1,569 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/content/v1/content.proto + +package content + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// ContentClient is the client API for Content service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ContentClient interface { + // Info returns information about a committed object. + // + // This call can be used for getting the size of content and checking for + // existence. + Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) + // Update updates content metadata. + // + // This call can be used to manage the mutable content labels. The + // immutable metadata such as digest, size, and committed at cannot + // be updated. + Update(ctx context.Context, in *UpdateRequest, opts ...grpc.CallOption) (*UpdateResponse, error) + // List streams the entire set of content as Info objects and closes the + // stream. + // + // Typically, this will yield a large response, chunked into messages. + // Clients should make provisions to ensure they can handle the entire data + // set. + List(ctx context.Context, in *ListContentRequest, opts ...grpc.CallOption) (Content_ListClient, error) + // Delete will delete the referenced object. + Delete(ctx context.Context, in *DeleteContentRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Read allows one to read an object based on the offset into the content. + // + // The requested data may be returned in one or more messages. + Read(ctx context.Context, in *ReadContentRequest, opts ...grpc.CallOption) (Content_ReadClient, error) + // Status returns the status for a single reference. + Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) + // ListStatuses returns the status of ongoing object ingestions, started via + // Write. + // + // Only those matching the regular expression will be provided in the + // response. If the provided regular expression is empty, all ingestions + // will be provided. + ListStatuses(ctx context.Context, in *ListStatusesRequest, opts ...grpc.CallOption) (*ListStatusesResponse, error) + // Write begins or resumes writes to a resource identified by a unique ref. + // Only one active stream may exist at a time for each ref. + // + // Once a write stream has started, it may only write to a single ref, thus + // once a stream is started, the ref may be omitted on subsequent writes. + // + // For any write transaction represented by a ref, only a single write may + // be made to a given offset. If overlapping writes occur, it is an error. + // Writes should be sequential and implementations may throw an error if + // this is required. + // + // If expected_digest is set and already part of the content store, the + // write will fail. + // + // When completed, the commit flag should be set to true. If expected size + // or digest is set, the content will be validated against those values. + Write(ctx context.Context, opts ...grpc.CallOption) (Content_WriteClient, error) + // Abort cancels the ongoing write named in the request. Any resources + // associated with the write will be collected. + Abort(ctx context.Context, in *AbortRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) +} + +type contentClient struct { + cc grpc.ClientConnInterface +} + +func NewContentClient(cc grpc.ClientConnInterface) ContentClient { + return &contentClient{cc} +} + +func (c *contentClient) Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) { + out := new(InfoResponse) + err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Info", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *contentClient) Update(ctx context.Context, in *UpdateRequest, opts ...grpc.CallOption) (*UpdateResponse, error) { + out := new(UpdateResponse) + err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *contentClient) List(ctx context.Context, in *ListContentRequest, opts ...grpc.CallOption) (Content_ListClient, error) { + stream, err := c.cc.NewStream(ctx, &Content_ServiceDesc.Streams[0], "/containerd.services.content.v1.Content/List", opts...) + if err != nil { + return nil, err + } + x := &contentListClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Content_ListClient interface { + Recv() (*ListContentResponse, error) + grpc.ClientStream +} + +type contentListClient struct { + grpc.ClientStream +} + +func (x *contentListClient) Recv() (*ListContentResponse, error) { + m := new(ListContentResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *contentClient) Delete(ctx context.Context, in *DeleteContentRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *contentClient) Read(ctx context.Context, in *ReadContentRequest, opts ...grpc.CallOption) (Content_ReadClient, error) { + stream, err := c.cc.NewStream(ctx, &Content_ServiceDesc.Streams[1], "/containerd.services.content.v1.Content/Read", opts...) + if err != nil { + return nil, err + } + x := &contentReadClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Content_ReadClient interface { + Recv() (*ReadContentResponse, error) + grpc.ClientStream +} + +type contentReadClient struct { + grpc.ClientStream +} + +func (x *contentReadClient) Recv() (*ReadContentResponse, error) { + m := new(ReadContentResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *contentClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { + out := new(StatusResponse) + err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Status", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *contentClient) ListStatuses(ctx context.Context, in *ListStatusesRequest, opts ...grpc.CallOption) (*ListStatusesResponse, error) { + out := new(ListStatusesResponse) + err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/ListStatuses", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *contentClient) Write(ctx context.Context, opts ...grpc.CallOption) (Content_WriteClient, error) { + stream, err := c.cc.NewStream(ctx, &Content_ServiceDesc.Streams[2], "/containerd.services.content.v1.Content/Write", opts...) + if err != nil { + return nil, err + } + x := &contentWriteClient{stream} + return x, nil +} + +type Content_WriteClient interface { + Send(*WriteContentRequest) error + Recv() (*WriteContentResponse, error) + grpc.ClientStream +} + +type contentWriteClient struct { + grpc.ClientStream +} + +func (x *contentWriteClient) Send(m *WriteContentRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *contentWriteClient) Recv() (*WriteContentResponse, error) { + m := new(WriteContentResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *contentClient) Abort(ctx context.Context, in *AbortRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Abort", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ContentServer is the server API for Content service. +// All implementations must embed UnimplementedContentServer +// for forward compatibility +type ContentServer interface { + // Info returns information about a committed object. + // + // This call can be used for getting the size of content and checking for + // existence. + Info(context.Context, *InfoRequest) (*InfoResponse, error) + // Update updates content metadata. + // + // This call can be used to manage the mutable content labels. The + // immutable metadata such as digest, size, and committed at cannot + // be updated. + Update(context.Context, *UpdateRequest) (*UpdateResponse, error) + // List streams the entire set of content as Info objects and closes the + // stream. + // + // Typically, this will yield a large response, chunked into messages. + // Clients should make provisions to ensure they can handle the entire data + // set. + List(*ListContentRequest, Content_ListServer) error + // Delete will delete the referenced object. + Delete(context.Context, *DeleteContentRequest) (*emptypb.Empty, error) + // Read allows one to read an object based on the offset into the content. + // + // The requested data may be returned in one or more messages. + Read(*ReadContentRequest, Content_ReadServer) error + // Status returns the status for a single reference. + Status(context.Context, *StatusRequest) (*StatusResponse, error) + // ListStatuses returns the status of ongoing object ingestions, started via + // Write. + // + // Only those matching the regular expression will be provided in the + // response. If the provided regular expression is empty, all ingestions + // will be provided. + ListStatuses(context.Context, *ListStatusesRequest) (*ListStatusesResponse, error) + // Write begins or resumes writes to a resource identified by a unique ref. + // Only one active stream may exist at a time for each ref. + // + // Once a write stream has started, it may only write to a single ref, thus + // once a stream is started, the ref may be omitted on subsequent writes. + // + // For any write transaction represented by a ref, only a single write may + // be made to a given offset. If overlapping writes occur, it is an error. + // Writes should be sequential and implementations may throw an error if + // this is required. + // + // If expected_digest is set and already part of the content store, the + // write will fail. + // + // When completed, the commit flag should be set to true. If expected size + // or digest is set, the content will be validated against those values. + Write(Content_WriteServer) error + // Abort cancels the ongoing write named in the request. Any resources + // associated with the write will be collected. + Abort(context.Context, *AbortRequest) (*emptypb.Empty, error) + mustEmbedUnimplementedContentServer() +} + +// UnimplementedContentServer must be embedded to have forward compatible implementations. +type UnimplementedContentServer struct { +} + +func (UnimplementedContentServer) Info(context.Context, *InfoRequest) (*InfoResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Info not implemented") +} +func (UnimplementedContentServer) Update(context.Context, *UpdateRequest) (*UpdateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") +} +func (UnimplementedContentServer) List(*ListContentRequest, Content_ListServer) error { + return status.Errorf(codes.Unimplemented, "method List not implemented") +} +func (UnimplementedContentServer) Delete(context.Context, *DeleteContentRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (UnimplementedContentServer) Read(*ReadContentRequest, Content_ReadServer) error { + return status.Errorf(codes.Unimplemented, "method Read not implemented") +} +func (UnimplementedContentServer) Status(context.Context, *StatusRequest) (*StatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") +} +func (UnimplementedContentServer) ListStatuses(context.Context, *ListStatusesRequest) (*ListStatusesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListStatuses not implemented") +} +func (UnimplementedContentServer) Write(Content_WriteServer) error { + return status.Errorf(codes.Unimplemented, "method Write not implemented") +} +func (UnimplementedContentServer) Abort(context.Context, *AbortRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Abort not implemented") +} +func (UnimplementedContentServer) mustEmbedUnimplementedContentServer() {} + +// UnsafeContentServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ContentServer will +// result in compilation errors. +type UnsafeContentServer interface { + mustEmbedUnimplementedContentServer() +} + +func RegisterContentServer(s grpc.ServiceRegistrar, srv ContentServer) { + s.RegisterService(&Content_ServiceDesc, srv) +} + +func _Content_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InfoRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContentServer).Info(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.content.v1.Content/Info", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContentServer).Info(ctx, req.(*InfoRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Content_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContentServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.content.v1.Content/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContentServer).Update(ctx, req.(*UpdateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Content_List_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ListContentRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ContentServer).List(m, &contentListServer{stream}) +} + +type Content_ListServer interface { + Send(*ListContentResponse) error + grpc.ServerStream +} + +type contentListServer struct { + grpc.ServerStream +} + +func (x *contentListServer) Send(m *ListContentResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Content_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteContentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContentServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.content.v1.Content/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContentServer).Delete(ctx, req.(*DeleteContentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Content_Read_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ReadContentRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ContentServer).Read(m, &contentReadServer{stream}) +} + +type Content_ReadServer interface { + Send(*ReadContentResponse) error + grpc.ServerStream +} + +type contentReadServer struct { + grpc.ServerStream +} + +func (x *contentReadServer) Send(m *ReadContentResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Content_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContentServer).Status(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.content.v1.Content/Status", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContentServer).Status(ctx, req.(*StatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Content_ListStatuses_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListStatusesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContentServer).ListStatuses(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.content.v1.Content/ListStatuses", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContentServer).ListStatuses(ctx, req.(*ListStatusesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Content_Write_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(ContentServer).Write(&contentWriteServer{stream}) +} + +type Content_WriteServer interface { + Send(*WriteContentResponse) error + Recv() (*WriteContentRequest, error) + grpc.ServerStream +} + +type contentWriteServer struct { + grpc.ServerStream +} + +func (x *contentWriteServer) Send(m *WriteContentResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *contentWriteServer) Recv() (*WriteContentRequest, error) { + m := new(WriteContentRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Content_Abort_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AbortRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ContentServer).Abort(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.content.v1.Content/Abort", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ContentServer).Abort(ctx, req.(*AbortRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Content_ServiceDesc is the grpc.ServiceDesc for Content service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Content_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.content.v1.Content", + HandlerType: (*ContentServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Info", + Handler: _Content_Info_Handler, + }, + { + MethodName: "Update", + Handler: _Content_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _Content_Delete_Handler, + }, + { + MethodName: "Status", + Handler: _Content_Status_Handler, + }, + { + MethodName: "ListStatuses", + Handler: _Content_ListStatuses_Handler, + }, + { + MethodName: "Abort", + Handler: _Content_Abort_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "List", + Handler: _Content_List_Handler, + ServerStreams: true, + }, + { + StreamName: "Read", + Handler: _Content_Read_Handler, + ServerStreams: true, + }, + { + StreamName: "Write", + Handler: _Content_Write_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "github.com/containerd/containerd/api/services/content/v1/content.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go index b1450ceb82f64..54df8b56d9cb5 100644 --- a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go @@ -1,121 +1,162 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/services/diff/v1/diff.proto package diff import ( - context "context" - fmt "fmt" types "github.com/containerd/containerd/api/types" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - types1 "github.com/gogo/protobuf/types" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type ApplyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Diff is the descriptor of the diff to be extracted - Diff *types.Descriptor `protobuf:"bytes,1,opt,name=diff,proto3" json:"diff,omitempty"` - Mounts []*types.Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"` - Payloads map[string]*types1.Any `protobuf:"bytes,3,rep,name=payloads,proto3" json:"payloads,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Diff *types.Descriptor `protobuf:"bytes,1,opt,name=diff,proto3" json:"diff,omitempty"` + Mounts []*types.Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"` + Payloads map[string]*anypb.Any `protobuf:"bytes,3,rep,name=payloads,proto3" json:"payloads,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *ApplyRequest) Reset() { *m = ApplyRequest{} } -func (*ApplyRequest) ProtoMessage() {} -func (*ApplyRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_3b36a99e6faaa935, []int{0} +func (x *ApplyRequest) Reset() { + *x = ApplyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ApplyRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + +func (x *ApplyRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ApplyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ApplyRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (*ApplyRequest) ProtoMessage() {} + +func (x *ApplyRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *ApplyRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ApplyRequest.Merge(m, src) + +// Deprecated: Use ApplyRequest.ProtoReflect.Descriptor instead. +func (*ApplyRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescGZIP(), []int{0} } -func (m *ApplyRequest) XXX_Size() int { - return m.Size() + +func (x *ApplyRequest) GetDiff() *types.Descriptor { + if x != nil { + return x.Diff + } + return nil } -func (m *ApplyRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ApplyRequest.DiscardUnknown(m) + +func (x *ApplyRequest) GetMounts() []*types.Mount { + if x != nil { + return x.Mounts + } + return nil } -var xxx_messageInfo_ApplyRequest proto.InternalMessageInfo +func (x *ApplyRequest) GetPayloads() map[string]*anypb.Any { + if x != nil { + return x.Payloads + } + return nil +} type ApplyResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Applied is the descriptor for the object which was applied. // If the input was a compressed blob then the result will be // the descriptor for the uncompressed blob. - Applied *types.Descriptor `protobuf:"bytes,1,opt,name=applied,proto3" json:"applied,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Applied *types.Descriptor `protobuf:"bytes,1,opt,name=applied,proto3" json:"applied,omitempty"` } -func (m *ApplyResponse) Reset() { *m = ApplyResponse{} } -func (*ApplyResponse) ProtoMessage() {} -func (*ApplyResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_3b36a99e6faaa935, []int{1} +func (x *ApplyResponse) Reset() { + *x = ApplyResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ApplyResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + +func (x *ApplyResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ApplyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ApplyResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (*ApplyResponse) ProtoMessage() {} + +func (x *ApplyResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *ApplyResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ApplyResponse.Merge(m, src) -} -func (m *ApplyResponse) XXX_Size() int { - return m.Size() -} -func (m *ApplyResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ApplyResponse.DiscardUnknown(m) + +// Deprecated: Use ApplyResponse.ProtoReflect.Descriptor instead. +func (*ApplyResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescGZIP(), []int{1} } -var xxx_messageInfo_ApplyResponse proto.InternalMessageInfo +func (x *ApplyResponse) GetApplied() *types.Descriptor { + if x != nil { + return x.Applied + } + return nil +} type DiffRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Left are the mounts which represent the older copy // in which is the base of the computed changes. Left []*types.Mount `protobuf:"bytes,1,rep,name=left,proto3" json:"left,omitempty"` @@ -130,1537 +171,341 @@ type DiffRequest struct { Ref string `protobuf:"bytes,4,opt,name=ref,proto3" json:"ref,omitempty"` // Labels are the labels to apply to the generated content // on content store commit. - Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // SourceDateEpoch specifies the timestamp used for whiteouts to provide control for reproducibility. + // See also https://reproducible-builds.org/docs/source-date-epoch/ . + SourceDateEpoch *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=source_date_epoch,json=sourceDateEpoch,proto3" json:"source_date_epoch,omitempty"` } -func (m *DiffRequest) Reset() { *m = DiffRequest{} } -func (*DiffRequest) ProtoMessage() {} -func (*DiffRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_3b36a99e6faaa935, []int{2} -} -func (m *DiffRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DiffRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DiffRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *DiffRequest) Reset() { + *x = DiffRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *DiffRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DiffRequest.Merge(m, src) -} -func (m *DiffRequest) XXX_Size() int { - return m.Size() -} -func (m *DiffRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DiffRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DiffRequest proto.InternalMessageInfo -type DiffResponse struct { - // Diff is the descriptor of the diff which can be applied - Diff *types.Descriptor `protobuf:"bytes,3,opt,name=diff,proto3" json:"diff,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DiffResponse) Reset() { *m = DiffResponse{} } -func (*DiffResponse) ProtoMessage() {} -func (*DiffResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_3b36a99e6faaa935, []int{3} -} -func (m *DiffResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DiffResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DiffResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DiffResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DiffResponse.Merge(m, src) -} -func (m *DiffResponse) XXX_Size() int { - return m.Size() -} -func (m *DiffResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DiffResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_DiffResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ApplyRequest)(nil), "containerd.services.diff.v1.ApplyRequest") - proto.RegisterMapType((map[string]*types1.Any)(nil), "containerd.services.diff.v1.ApplyRequest.PayloadsEntry") - proto.RegisterType((*ApplyResponse)(nil), "containerd.services.diff.v1.ApplyResponse") - proto.RegisterType((*DiffRequest)(nil), "containerd.services.diff.v1.DiffRequest") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.diff.v1.DiffRequest.LabelsEntry") - proto.RegisterType((*DiffResponse)(nil), "containerd.services.diff.v1.DiffResponse") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/diff/v1/diff.proto", fileDescriptor_3b36a99e6faaa935) -} - -var fileDescriptor_3b36a99e6faaa935 = []byte{ - // 526 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x41, 0x6f, 0xd3, 0x4c, - 0x10, 0x8d, 0xed, 0x24, 0xdf, 0x97, 0x49, 0x2b, 0xa1, 0x55, 0x24, 0x8c, 0x01, 0xab, 0xca, 0x29, - 0x2d, 0x62, 0x4d, 0x03, 0x2a, 0xd0, 0x5e, 0x5a, 0x54, 0xc4, 0xa5, 0x48, 0x60, 0x7a, 0x40, 0x20, - 0x81, 0x9c, 0x78, 0xed, 0xae, 0x70, 0xbc, 0x8b, 0x77, 0x1d, 0xc9, 0x37, 0xfe, 0x06, 0x67, 0x7e, - 0x0a, 0x97, 0x1e, 0x39, 0x72, 0xa4, 0xf9, 0x25, 0xc8, 0xeb, 0x75, 0x31, 0x02, 0x05, 0xc3, 0xc9, - 0x9b, 0x9d, 0xf7, 0xde, 0xce, 0xbc, 0x37, 0x0a, 0x1c, 0xc6, 0x54, 0x9e, 0xe5, 0x33, 0x3c, 0x67, - 0x0b, 0x6f, 0xce, 0x52, 0x19, 0xd0, 0x94, 0x64, 0x61, 0xf3, 0x18, 0x70, 0xea, 0x09, 0x92, 0x2d, - 0xe9, 0x9c, 0x08, 0x2f, 0xa4, 0x51, 0xe4, 0x2d, 0x77, 0xd5, 0x17, 0xf3, 0x8c, 0x49, 0x86, 0xae, - 0xff, 0xc0, 0xe2, 0x1a, 0x87, 0x55, 0x7d, 0xb9, 0xeb, 0x8c, 0x62, 0x16, 0x33, 0x85, 0xf3, 0xca, - 0x53, 0x45, 0x71, 0xae, 0xc5, 0x8c, 0xc5, 0x09, 0xf1, 0xd4, 0xaf, 0x59, 0x1e, 0x79, 0x41, 0x5a, - 0xe8, 0xd2, 0x5e, 0xab, 0x7e, 0x64, 0xc1, 0x89, 0xf0, 0x16, 0x2c, 0x4f, 0xa5, 0xe6, 0x1d, 0xfc, - 0x05, 0x2f, 0x24, 0x62, 0x9e, 0x51, 0x2e, 0x59, 0x56, 0x91, 0xc7, 0x1f, 0x4d, 0xd8, 0x38, 0xe2, - 0x3c, 0x29, 0x7c, 0xf2, 0x3e, 0x27, 0x42, 0xa2, 0x3b, 0xd0, 0x2d, 0x27, 0xb0, 0x8d, 0x2d, 0x63, - 0x32, 0x9c, 0xde, 0xc0, 0x8d, 0x11, 0x95, 0x04, 0x3e, 0xbe, 0x94, 0xf0, 0x15, 0x12, 0x79, 0xd0, - 0x57, 0xed, 0x08, 0xdb, 0xdc, 0xb2, 0x26, 0xc3, 0xe9, 0xd5, 0x5f, 0x39, 0x4f, 0xcb, 0xba, 0xaf, - 0x61, 0xe8, 0x05, 0xfc, 0xcf, 0x83, 0x22, 0x61, 0x41, 0x28, 0x6c, 0x4b, 0x51, 0xee, 0xe3, 0x35, - 0x4e, 0xe2, 0x66, 0x7f, 0xf8, 0x99, 0x66, 0x3e, 0x4e, 0x65, 0x56, 0xf8, 0x97, 0x42, 0xce, 0x73, - 0xd8, 0xfc, 0xa9, 0x84, 0xae, 0x80, 0xf5, 0x8e, 0x14, 0x6a, 0x8e, 0x81, 0x5f, 0x1e, 0xd1, 0x0e, - 0xf4, 0x96, 0x41, 0x92, 0x13, 0xdb, 0x54, 0xb3, 0x8d, 0x70, 0x95, 0x05, 0xae, 0xb3, 0xc0, 0x47, - 0x69, 0xe1, 0x57, 0x90, 0x7d, 0xf3, 0x81, 0x31, 0x7e, 0x02, 0x9b, 0xfa, 0x69, 0xc1, 0x59, 0x2a, - 0x08, 0xda, 0x83, 0xff, 0x02, 0xce, 0x13, 0x4a, 0xc2, 0x56, 0xf6, 0xd4, 0xe0, 0xf1, 0x27, 0x13, - 0x86, 0xc7, 0x34, 0x8a, 0x6a, 0x8f, 0x6f, 0x41, 0x37, 0x21, 0x91, 0xb4, 0x8d, 0xf5, 0x7e, 0x29, - 0x10, 0xba, 0x0d, 0xbd, 0x8c, 0xc6, 0x67, 0xf2, 0x4f, 0xee, 0x56, 0x28, 0x74, 0x13, 0x60, 0x41, - 0x42, 0x1a, 0xbc, 0x2d, 0x6b, 0xb6, 0xa5, 0xa6, 0x1f, 0xa8, 0x9b, 0xd3, 0x82, 0x93, 0xd2, 0x95, - 0x8c, 0x44, 0x76, 0xb7, 0x72, 0x25, 0x23, 0x11, 0x3a, 0x81, 0x7e, 0x12, 0xcc, 0x48, 0x22, 0xec, - 0x9e, 0x7a, 0xe0, 0xde, 0xda, 0x2c, 0x1a, 0x63, 0xe0, 0x13, 0x45, 0xab, 0x82, 0xd0, 0x1a, 0xce, - 0x43, 0x18, 0x36, 0xae, 0x7f, 0x13, 0xc2, 0xa8, 0x19, 0xc2, 0xa0, 0x69, 0xf7, 0x21, 0x6c, 0x54, - 0xea, 0xda, 0xed, 0x7a, 0x13, 0xad, 0xb6, 0x9b, 0x38, 0xfd, 0x6c, 0x40, 0xb7, 0x94, 0x40, 0x6f, - 0xa0, 0xa7, 0x92, 0x43, 0xdb, 0xad, 0x17, 0xcb, 0xd9, 0x69, 0x03, 0xd5, 0xad, 0xbd, 0xd6, 0xef, - 0x4c, 0xda, 0x7a, 0xe5, 0x6c, 0xb7, 0x40, 0x56, 0xe2, 0x8f, 0x4e, 0xcf, 0x2f, 0xdc, 0xce, 0xd7, - 0x0b, 0xb7, 0xf3, 0x61, 0xe5, 0x1a, 0xe7, 0x2b, 0xd7, 0xf8, 0xb2, 0x72, 0x8d, 0x6f, 0x2b, 0xd7, - 0x78, 0xb5, 0xff, 0x4f, 0xff, 0x58, 0x07, 0xe5, 0xf7, 0x65, 0x67, 0xd6, 0x57, 0x7b, 0x7e, 0xf7, - 0x7b, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x85, 0x25, 0xb8, 0xf8, 0x04, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// DiffClient is the client API for Diff service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type DiffClient interface { - // Apply applies the content associated with the provided digests onto - // the provided mounts. Archive content will be extracted and - // decompressed if necessary. - Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.CallOption) (*ApplyResponse, error) - // Diff creates a diff between the given mounts and uploads the result - // to the content store. - Diff(ctx context.Context, in *DiffRequest, opts ...grpc.CallOption) (*DiffResponse, error) -} - -type diffClient struct { - cc *grpc.ClientConn -} - -func NewDiffClient(cc *grpc.ClientConn) DiffClient { - return &diffClient{cc} -} - -func (c *diffClient) Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.CallOption) (*ApplyResponse, error) { - out := new(ApplyResponse) - err := c.cc.Invoke(ctx, "/containerd.services.diff.v1.Diff/Apply", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +func (x *DiffRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (c *diffClient) Diff(ctx context.Context, in *DiffRequest, opts ...grpc.CallOption) (*DiffResponse, error) { - out := new(DiffResponse) - err := c.cc.Invoke(ctx, "/containerd.services.diff.v1.Diff/Diff", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// DiffServer is the server API for Diff service. -type DiffServer interface { - // Apply applies the content associated with the provided digests onto - // the provided mounts. Archive content will be extracted and - // decompressed if necessary. - Apply(context.Context, *ApplyRequest) (*ApplyResponse, error) - // Diff creates a diff between the given mounts and uploads the result - // to the content store. - Diff(context.Context, *DiffRequest) (*DiffResponse, error) -} - -// UnimplementedDiffServer can be embedded to have forward compatible implementations. -type UnimplementedDiffServer struct { -} - -func (*UnimplementedDiffServer) Apply(ctx context.Context, req *ApplyRequest) (*ApplyResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Apply not implemented") -} -func (*UnimplementedDiffServer) Diff(ctx context.Context, req *DiffRequest) (*DiffResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Diff not implemented") -} - -func RegisterDiffServer(s *grpc.Server, srv DiffServer) { - s.RegisterService(&_Diff_serviceDesc, srv) -} - -func _Diff_Apply_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ApplyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DiffServer).Apply(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.diff.v1.Diff/Apply", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DiffServer).Apply(ctx, req.(*ApplyRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Diff_Diff_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DiffRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(DiffServer).Diff(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.diff.v1.Diff/Diff", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(DiffServer).Diff(ctx, req.(*DiffRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Diff_serviceDesc = grpc.ServiceDesc{ - ServiceName: "containerd.services.diff.v1.Diff", - HandlerType: (*DiffServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Apply", - Handler: _Diff_Apply_Handler, - }, - { - MethodName: "Diff", - Handler: _Diff_Diff_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "github.com/containerd/containerd/api/services/diff/v1/diff.proto", -} +func (*DiffRequest) ProtoMessage() {} -func (m *ApplyRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *DiffRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *ApplyRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use DiffRequest.ProtoReflect.Descriptor instead. +func (*DiffRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescGZIP(), []int{2} } -func (m *ApplyRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Payloads) > 0 { - for k := range m.Payloads { - v := m.Payloads[k] - baseI := i - if v != nil { - { - size, err := v.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDiff(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintDiff(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintDiff(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a - } - } - if len(m.Mounts) > 0 { - for iNdEx := len(m.Mounts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Mounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDiff(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } +func (x *DiffRequest) GetLeft() []*types.Mount { + if x != nil { + return x.Left } - if m.Diff != nil { - { - size, err := m.Diff.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDiff(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return nil } -func (m *ApplyResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *DiffRequest) GetRight() []*types.Mount { + if x != nil { + return x.Right } - return dAtA[:n], nil -} - -func (m *ApplyResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return nil } -func (m *ApplyResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Applied != nil { - { - size, err := m.Applied.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDiff(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa +func (x *DiffRequest) GetMediaType() string { + if x != nil { + return x.MediaType } - return len(dAtA) - i, nil + return "" } -func (m *DiffRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *DiffRequest) GetRef() string { + if x != nil { + return x.Ref } - return dAtA[:n], nil + return "" } -func (m *DiffRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DiffRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintDiff(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintDiff(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintDiff(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x2a - } - } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintDiff(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0x22 +func (x *DiffRequest) GetLabels() map[string]string { + if x != nil { + return x.Labels } - if len(m.MediaType) > 0 { - i -= len(m.MediaType) - copy(dAtA[i:], m.MediaType) - i = encodeVarintDiff(dAtA, i, uint64(len(m.MediaType))) - i-- - dAtA[i] = 0x1a - } - if len(m.Right) > 0 { - for iNdEx := len(m.Right) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Right[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDiff(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Left) > 0 { - for iNdEx := len(m.Left) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Left[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDiff(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil + return nil } -func (m *DiffResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *DiffRequest) GetSourceDateEpoch() *timestamppb.Timestamp { + if x != nil { + return x.SourceDateEpoch } - return dAtA[:n], nil -} - -func (m *DiffResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return nil } -func (m *DiffResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Diff != nil { - { - size, err := m.Diff.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDiff(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - return len(dAtA) - i, nil -} +type DiffResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func encodeVarintDiff(dAtA []byte, offset int, v uint64) int { - offset -= sovDiff(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ApplyRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Diff != nil { - l = m.Diff.Size() - n += 1 + l + sovDiff(uint64(l)) - } - if len(m.Mounts) > 0 { - for _, e := range m.Mounts { - l = e.Size() - n += 1 + l + sovDiff(uint64(l)) - } - } - if len(m.Payloads) > 0 { - for k, v := range m.Payloads { - _ = k - _ = v - l = 0 - if v != nil { - l = v.Size() - l += 1 + sovDiff(uint64(l)) - } - mapEntrySize := 1 + len(k) + sovDiff(uint64(len(k))) + l - n += mapEntrySize + 1 + sovDiff(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n + // Diff is the descriptor of the diff which can be applied + Diff *types.Descriptor `protobuf:"bytes,3,opt,name=diff,proto3" json:"diff,omitempty"` } -func (m *ApplyResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Applied != nil { - l = m.Applied.Size() - n += 1 + l + sovDiff(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) +func (x *DiffResponse) Reset() { + *x = DiffResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return n } -func (m *DiffRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Left) > 0 { - for _, e := range m.Left { - l = e.Size() - n += 1 + l + sovDiff(uint64(l)) - } - } - if len(m.Right) > 0 { - for _, e := range m.Right { - l = e.Size() - n += 1 + l + sovDiff(uint64(l)) - } - } - l = len(m.MediaType) - if l > 0 { - n += 1 + l + sovDiff(uint64(l)) - } - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovDiff(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovDiff(uint64(len(k))) + 1 + len(v) + sovDiff(uint64(len(v))) - n += mapEntrySize + 1 + sovDiff(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n +func (x *DiffResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DiffResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Diff != nil { - l = m.Diff.Size() - n += 1 + l + sovDiff(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} +func (*DiffResponse) ProtoMessage() {} -func sovDiff(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozDiff(x uint64) (n int) { - return sovDiff(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ApplyRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForMounts := "[]*Mount{" - for _, f := range this.Mounts { - repeatedStringForMounts += strings.Replace(fmt.Sprintf("%v", f), "Mount", "types.Mount", 1) + "," - } - repeatedStringForMounts += "}" - keysForPayloads := make([]string, 0, len(this.Payloads)) - for k, _ := range this.Payloads { - keysForPayloads = append(keysForPayloads, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForPayloads) - mapStringForPayloads := "map[string]*types1.Any{" - for _, k := range keysForPayloads { - mapStringForPayloads += fmt.Sprintf("%v: %v,", k, this.Payloads[k]) - } - mapStringForPayloads += "}" - s := strings.Join([]string{`&ApplyRequest{`, - `Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "types.Descriptor", 1) + `,`, - `Mounts:` + repeatedStringForMounts + `,`, - `Payloads:` + mapStringForPayloads + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ApplyResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ApplyResponse{`, - `Applied:` + strings.Replace(fmt.Sprintf("%v", this.Applied), "Descriptor", "types.Descriptor", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DiffRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForLeft := "[]*Mount{" - for _, f := range this.Left { - repeatedStringForLeft += strings.Replace(fmt.Sprintf("%v", f), "Mount", "types.Mount", 1) + "," - } - repeatedStringForLeft += "}" - repeatedStringForRight := "[]*Mount{" - for _, f := range this.Right { - repeatedStringForRight += strings.Replace(fmt.Sprintf("%v", f), "Mount", "types.Mount", 1) + "," - } - repeatedStringForRight += "}" - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&DiffRequest{`, - `Left:` + repeatedStringForLeft + `,`, - `Right:` + repeatedStringForRight + `,`, - `MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`, - `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DiffResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DiffResponse{`, - `Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "types.Descriptor", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringDiff(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *ApplyRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ApplyRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ApplyRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Diff", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDiff - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDiff - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Diff == nil { - m.Diff = &types.Descriptor{} - } - if err := m.Diff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDiff - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDiff - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Mounts = append(m.Mounts, &types.Mount{}) - if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Payloads", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDiff - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDiff - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Payloads == nil { - m.Payloads = make(map[string]*types1.Any) - } - var mapkey string - var mapvalue *types1.Any - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthDiff - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthDiff - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthDiff - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthDiff - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &types1.Any{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipDiff(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDiff - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Payloads[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDiff(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDiff - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy +func (x *DiffResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil + return mi.MessageOf(x) } -func (m *ApplyResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ApplyResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ApplyResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Applied", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDiff - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDiff - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Applied == nil { - m.Applied = &types.Descriptor{} - } - if err := m.Applied.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDiff(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDiff - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil +// Deprecated: Use DiffResponse.ProtoReflect.Descriptor instead. +func (*DiffResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescGZIP(), []int{3} } -func (m *DiffRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DiffRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DiffRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Left", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDiff - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDiff - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Left = append(m.Left, &types.Mount{}) - if err := m.Left[len(m.Left)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Right", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDiff - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDiff - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Right = append(m.Right, &types.Mount{}) - if err := m.Right[len(m.Right)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthDiff - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthDiff - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MediaType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthDiff - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthDiff - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDiff - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDiff - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthDiff - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthDiff - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthDiff - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthDiff - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipDiff(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDiff - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDiff(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDiff - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *DiffResponse) GetDiff() *types.Descriptor { + if x != nil { + return x.Diff } return nil } -func (m *DiffResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DiffResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DiffResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Diff", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDiff - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDiff - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDiff - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Diff == nil { - m.Diff = &types.Descriptor{} - } - if err := m.Diff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDiff(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDiff - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipDiff(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDiff - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDiff - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDiff - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthDiff - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupDiff - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthDiff - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF +var File_github_com_containerd_containerd_api_services_diff_v1_diff_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDesc = []byte{ + 0x0a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x64, 0x69, 0x66, 0x66, 0x2f, 0x76, 0x31, 0x2f, 0x64, 0x69, 0x66, 0x66, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x1b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x64, 0x69, 0x66, 0x66, 0x2e, 0x76, 0x31, 0x1a, + 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x36, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x99, 0x02, 0x0a, 0x0c, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x30, 0x0a, 0x04, 0x64, 0x69, 0x66, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x64, + 0x69, 0x66, 0x66, 0x12, 0x2f, 0x0a, 0x06, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x06, 0x6d, 0x6f, + 0x75, 0x6e, 0x74, 0x73, 0x12, 0x53, 0x0a, 0x08, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x64, 0x69, 0x66, + 0x66, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x08, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x1a, 0x51, 0x0a, 0x0d, 0x50, 0x61, 0x79, + 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, + 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x47, 0x0a, 0x0d, + 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, + 0x07, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x07, 0x61, 0x70, + 0x70, 0x6c, 0x69, 0x65, 0x64, 0x22, 0xeb, 0x02, 0x0a, 0x0b, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x04, 0x6c, 0x65, 0x66, 0x74, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x04, 0x6c, 0x65, + 0x66, 0x74, 0x12, 0x2d, 0x0a, 0x05, 0x72, 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x05, 0x72, 0x69, 0x67, 0x68, + 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x10, 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, + 0x65, 0x66, 0x12, 0x4c, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x64, 0x69, 0x66, 0x66, 0x2e, 0x76, 0x31, + 0x2e, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x12, 0x46, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x65, 0x5f, + 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, + 0x61, 0x74, 0x65, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0x40, 0x0a, 0x0c, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x04, 0x64, 0x69, 0x66, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, + 0x04, 0x64, 0x69, 0x66, 0x66, 0x32, 0xc3, 0x01, 0x0a, 0x04, 0x44, 0x69, 0x66, 0x66, 0x12, 0x5e, + 0x0a, 0x05, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x12, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x64, 0x69, + 0x66, 0x66, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x64, 0x69, 0x66, 0x66, 0x2e, 0x76, 0x31, + 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5b, + 0x0a, 0x04, 0x44, 0x69, 0x66, 0x66, 0x12, 0x28, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x64, 0x69, 0x66, + 0x66, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x66, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x64, 0x69, 0x66, 0x66, 0x2e, 0x76, 0x31, 0x2e, 0x44, + 0x69, 0x66, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x3c, 0x5a, 0x3a, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x64, 0x69, 0x66, + 0x66, 0x2f, 0x76, 0x31, 0x3b, 0x64, 0x69, 0x66, 0x66, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( - ErrInvalidLengthDiff = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowDiff = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupDiff = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescData = file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_goTypes = []interface{}{ + (*ApplyRequest)(nil), // 0: containerd.services.diff.v1.ApplyRequest + (*ApplyResponse)(nil), // 1: containerd.services.diff.v1.ApplyResponse + (*DiffRequest)(nil), // 2: containerd.services.diff.v1.DiffRequest + (*DiffResponse)(nil), // 3: containerd.services.diff.v1.DiffResponse + nil, // 4: containerd.services.diff.v1.ApplyRequest.PayloadsEntry + nil, // 5: containerd.services.diff.v1.DiffRequest.LabelsEntry + (*types.Descriptor)(nil), // 6: containerd.types.Descriptor + (*types.Mount)(nil), // 7: containerd.types.Mount + (*timestamppb.Timestamp)(nil), // 8: google.protobuf.Timestamp + (*anypb.Any)(nil), // 9: google.protobuf.Any +} +var file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_depIdxs = []int32{ + 6, // 0: containerd.services.diff.v1.ApplyRequest.diff:type_name -> containerd.types.Descriptor + 7, // 1: containerd.services.diff.v1.ApplyRequest.mounts:type_name -> containerd.types.Mount + 4, // 2: containerd.services.diff.v1.ApplyRequest.payloads:type_name -> containerd.services.diff.v1.ApplyRequest.PayloadsEntry + 6, // 3: containerd.services.diff.v1.ApplyResponse.applied:type_name -> containerd.types.Descriptor + 7, // 4: containerd.services.diff.v1.DiffRequest.left:type_name -> containerd.types.Mount + 7, // 5: containerd.services.diff.v1.DiffRequest.right:type_name -> containerd.types.Mount + 5, // 6: containerd.services.diff.v1.DiffRequest.labels:type_name -> containerd.services.diff.v1.DiffRequest.LabelsEntry + 8, // 7: containerd.services.diff.v1.DiffRequest.source_date_epoch:type_name -> google.protobuf.Timestamp + 6, // 8: containerd.services.diff.v1.DiffResponse.diff:type_name -> containerd.types.Descriptor + 9, // 9: containerd.services.diff.v1.ApplyRequest.PayloadsEntry.value:type_name -> google.protobuf.Any + 0, // 10: containerd.services.diff.v1.Diff.Apply:input_type -> containerd.services.diff.v1.ApplyRequest + 2, // 11: containerd.services.diff.v1.Diff.Diff:input_type -> containerd.services.diff.v1.DiffRequest + 1, // 12: containerd.services.diff.v1.Diff.Apply:output_type -> containerd.services.diff.v1.ApplyResponse + 3, // 13: containerd.services.diff.v1.Diff.Diff:output_type -> containerd.services.diff.v1.DiffResponse + 12, // [12:14] is the sub-list for method output_type + 10, // [10:12] is the sub-list for method input_type + 10, // [10:10] is the sub-list for extension type_name + 10, // [10:10] is the sub-list for extension extendee + 0, // [0:10] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_init() } +func file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_init() { + if File_github_com_containerd_containerd_api_services_diff_v1_diff_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApplyResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DiffRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DiffResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDesc, + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_diff_v1_diff_proto = out.File + file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_diff_v1_diff_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto index cc24e3f2caa02..7191a562e28b5 100644 --- a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto +++ b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto @@ -18,8 +18,8 @@ syntax = "proto3"; package containerd.services.diff.v1; -import weak "gogoproto/gogo.proto"; import "google/protobuf/any.proto"; +import "google/protobuf/timestamp.proto"; import "github.com/containerd/containerd/api/types/mount.proto"; import "github.com/containerd/containerd/api/types/descriptor.proto"; @@ -73,6 +73,10 @@ message DiffRequest { // Labels are the labels to apply to the generated content // on content store commit. map labels = 5; + + // SourceDateEpoch specifies the timestamp used for whiteouts to provide control for reproducibility. + // See also https://reproducible-builds.org/docs/source-date-epoch/ . + google.protobuf.Timestamp source_date_epoch = 6; } message DiffResponse { diff --git a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff_grpc.pb.go new file mode 100644 index 0000000000000..daa3b1801ac44 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff_grpc.pb.go @@ -0,0 +1,151 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/diff/v1/diff.proto + +package diff + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// DiffClient is the client API for Diff service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type DiffClient interface { + // Apply applies the content associated with the provided digests onto + // the provided mounts. Archive content will be extracted and + // decompressed if necessary. + Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.CallOption) (*ApplyResponse, error) + // Diff creates a diff between the given mounts and uploads the result + // to the content store. + Diff(ctx context.Context, in *DiffRequest, opts ...grpc.CallOption) (*DiffResponse, error) +} + +type diffClient struct { + cc grpc.ClientConnInterface +} + +func NewDiffClient(cc grpc.ClientConnInterface) DiffClient { + return &diffClient{cc} +} + +func (c *diffClient) Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.CallOption) (*ApplyResponse, error) { + out := new(ApplyResponse) + err := c.cc.Invoke(ctx, "/containerd.services.diff.v1.Diff/Apply", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *diffClient) Diff(ctx context.Context, in *DiffRequest, opts ...grpc.CallOption) (*DiffResponse, error) { + out := new(DiffResponse) + err := c.cc.Invoke(ctx, "/containerd.services.diff.v1.Diff/Diff", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// DiffServer is the server API for Diff service. +// All implementations must embed UnimplementedDiffServer +// for forward compatibility +type DiffServer interface { + // Apply applies the content associated with the provided digests onto + // the provided mounts. Archive content will be extracted and + // decompressed if necessary. + Apply(context.Context, *ApplyRequest) (*ApplyResponse, error) + // Diff creates a diff between the given mounts and uploads the result + // to the content store. + Diff(context.Context, *DiffRequest) (*DiffResponse, error) + mustEmbedUnimplementedDiffServer() +} + +// UnimplementedDiffServer must be embedded to have forward compatible implementations. +type UnimplementedDiffServer struct { +} + +func (UnimplementedDiffServer) Apply(context.Context, *ApplyRequest) (*ApplyResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Apply not implemented") +} +func (UnimplementedDiffServer) Diff(context.Context, *DiffRequest) (*DiffResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Diff not implemented") +} +func (UnimplementedDiffServer) mustEmbedUnimplementedDiffServer() {} + +// UnsafeDiffServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to DiffServer will +// result in compilation errors. +type UnsafeDiffServer interface { + mustEmbedUnimplementedDiffServer() +} + +func RegisterDiffServer(s grpc.ServiceRegistrar, srv DiffServer) { + s.RegisterService(&Diff_ServiceDesc, srv) +} + +func _Diff_Apply_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ApplyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DiffServer).Apply(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.diff.v1.Diff/Apply", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DiffServer).Apply(ctx, req.(*ApplyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Diff_Diff_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DiffRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DiffServer).Diff(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.diff.v1.Diff/Diff", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DiffServer).Diff(ctx, req.(*DiffRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Diff_ServiceDesc is the grpc.ServiceDesc for Diff service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Diff_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.diff.v1.Diff", + HandlerType: (*DiffServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Apply", + Handler: _Diff_Apply_Handler, + }, + { + MethodName: "Diff", + Handler: _Diff_Diff_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/containerd/containerd/api/services/diff/v1/diff.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go b/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go index 4373f3bf2fc6d..f083d9fce29c1 100644 --- a/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go @@ -1,1372 +1,443 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/services/events/v1/events.proto package events import ( - context "context" - fmt "fmt" - github_com_containerd_typeurl "github.com/containerd/typeurl" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types "github.com/gogo/protobuf/types" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" + _ "github.com/containerd/containerd/protobuf/plugin" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + emptypb "google.golang.org/protobuf/types/known/emptypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - strings "strings" - time "time" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type PublishRequest struct { - Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` - Event *types.Any `protobuf:"bytes,2,opt,name=event,proto3" json:"event,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *PublishRequest) Reset() { *m = PublishRequest{} } -func (*PublishRequest) ProtoMessage() {} -func (*PublishRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_43fcd20dc1642376, []int{0} -} -func (m *PublishRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PublishRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PublishRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PublishRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PublishRequest.Merge(m, src) -} -func (m *PublishRequest) XXX_Size() int { - return m.Size() -} -func (m *PublishRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PublishRequest.DiscardUnknown(m) + Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` + Event *anypb.Any `protobuf:"bytes,2,opt,name=event,proto3" json:"event,omitempty"` } -var xxx_messageInfo_PublishRequest proto.InternalMessageInfo - -type ForwardRequest struct { - Envelope *Envelope `protobuf:"bytes,1,opt,name=envelope,proto3" json:"envelope,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ForwardRequest) Reset() { *m = ForwardRequest{} } -func (*ForwardRequest) ProtoMessage() {} -func (*ForwardRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_43fcd20dc1642376, []int{1} -} -func (m *ForwardRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ForwardRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ForwardRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *PublishRequest) Reset() { + *x = PublishRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ForwardRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ForwardRequest.Merge(m, src) -} -func (m *ForwardRequest) XXX_Size() int { - return m.Size() -} -func (m *ForwardRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ForwardRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ForwardRequest proto.InternalMessageInfo -type SubscribeRequest struct { - Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *PublishRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *SubscribeRequest) Reset() { *m = SubscribeRequest{} } -func (*SubscribeRequest) ProtoMessage() {} -func (*SubscribeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_43fcd20dc1642376, []int{2} -} -func (m *SubscribeRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SubscribeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SubscribeRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SubscribeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SubscribeRequest.Merge(m, src) -} -func (m *SubscribeRequest) XXX_Size() int { - return m.Size() -} -func (m *SubscribeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SubscribeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SubscribeRequest proto.InternalMessageInfo - -type Envelope struct { - Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"timestamp"` - Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` - Topic string `protobuf:"bytes,3,opt,name=topic,proto3" json:"topic,omitempty"` - Event *types.Any `protobuf:"bytes,4,opt,name=event,proto3" json:"event,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Envelope) Reset() { *m = Envelope{} } -func (*Envelope) ProtoMessage() {} -func (*Envelope) Descriptor() ([]byte, []int) { - return fileDescriptor_43fcd20dc1642376, []int{3} -} -func (m *Envelope) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Envelope) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Envelope.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Envelope) XXX_Merge(src proto.Message) { - xxx_messageInfo_Envelope.Merge(m, src) -} -func (m *Envelope) XXX_Size() int { - return m.Size() -} -func (m *Envelope) XXX_DiscardUnknown() { - xxx_messageInfo_Envelope.DiscardUnknown(m) -} - -var xxx_messageInfo_Envelope proto.InternalMessageInfo - -func init() { - proto.RegisterType((*PublishRequest)(nil), "containerd.services.events.v1.PublishRequest") - proto.RegisterType((*ForwardRequest)(nil), "containerd.services.events.v1.ForwardRequest") - proto.RegisterType((*SubscribeRequest)(nil), "containerd.services.events.v1.SubscribeRequest") - proto.RegisterType((*Envelope)(nil), "containerd.services.events.v1.Envelope") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/events.proto", fileDescriptor_43fcd20dc1642376) -} - -var fileDescriptor_43fcd20dc1642376 = []byte{ - // 466 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcd, 0x8e, 0xd3, 0x30, - 0x14, 0x85, 0xeb, 0xf9, 0x6d, 0x3c, 0xd2, 0x08, 0x45, 0x15, 0x2a, 0x01, 0xd2, 0xaa, 0x1b, 0x2a, - 0x04, 0x0e, 0x53, 0x76, 0x20, 0x21, 0x28, 0x94, 0xf5, 0x28, 0x80, 0x54, 0xb1, 0x4b, 0xd2, 0xdb, - 0xd4, 0x52, 0x62, 0x9b, 0xd8, 0x09, 0x9a, 0xdd, 0x3c, 0x02, 0x1b, 0xde, 0x84, 0x0d, 0x6f, 0xd0, - 0x25, 0x4b, 0x56, 0xc0, 0xf4, 0x49, 0x50, 0x13, 0xbb, 0x61, 0x3a, 0x40, 0x10, 0xbb, 0x6b, 0xdf, - 0xe3, 0xcf, 0xb9, 0xe7, 0x38, 0xf8, 0x45, 0x4c, 0xd5, 0x22, 0x0f, 0x49, 0xc4, 0x53, 0x2f, 0xe2, - 0x4c, 0x05, 0x94, 0x41, 0x36, 0xfb, 0xb5, 0x0c, 0x04, 0xf5, 0x24, 0x64, 0x05, 0x8d, 0x40, 0x7a, - 0x50, 0x00, 0x53, 0xd2, 0x2b, 0x4e, 0x74, 0x45, 0x44, 0xc6, 0x15, 0xb7, 0x6f, 0xd7, 0x7a, 0x62, - 0xb4, 0x44, 0x2b, 0x8a, 0x13, 0xe7, 0x69, 0xe3, 0x25, 0x25, 0x26, 0xcc, 0xe7, 0x9e, 0x48, 0xf2, - 0x98, 0x32, 0x6f, 0x4e, 0x21, 0x99, 0x89, 0x40, 0x2d, 0xaa, 0x0b, 0x9c, 0x4e, 0xcc, 0x63, 0x5e, - 0x96, 0xde, 0xba, 0xd2, 0xbb, 0x37, 0x62, 0xce, 0xe3, 0x04, 0xea, 0xd3, 0x01, 0x3b, 0xd3, 0xad, - 0x9b, 0xdb, 0x2d, 0x48, 0x85, 0x32, 0xcd, 0xde, 0x76, 0x53, 0xd1, 0x14, 0xa4, 0x0a, 0x52, 0x51, - 0x09, 0x06, 0x3e, 0x3e, 0x3e, 0xcd, 0xc3, 0x84, 0xca, 0x85, 0x0f, 0xef, 0x72, 0x90, 0xca, 0xee, - 0xe0, 0x7d, 0xc5, 0x05, 0x8d, 0xba, 0xa8, 0x8f, 0x86, 0x96, 0x5f, 0x2d, 0xec, 0xbb, 0x78, 0xbf, - 0x9c, 0xb2, 0xbb, 0xd3, 0x47, 0xc3, 0xa3, 0x51, 0x87, 0x54, 0x60, 0x62, 0xc0, 0xe4, 0x19, 0x3b, - 0xf3, 0x2b, 0xc9, 0xe0, 0x0d, 0x3e, 0x7e, 0xc9, 0xb3, 0xf7, 0x41, 0x36, 0x33, 0xcc, 0xe7, 0xb8, - 0x0d, 0xac, 0x80, 0x84, 0x0b, 0x28, 0xb1, 0x47, 0xa3, 0x3b, 0xe4, 0xaf, 0x46, 0x92, 0x89, 0x96, - 0xfb, 0x9b, 0x83, 0x83, 0x7b, 0xf8, 0xda, 0xab, 0x3c, 0x94, 0x51, 0x46, 0x43, 0x30, 0xe0, 0x2e, - 0x3e, 0x9c, 0xd3, 0x44, 0x41, 0x26, 0xbb, 0xa8, 0xbf, 0x3b, 0xb4, 0x7c, 0xb3, 0x1c, 0x7c, 0x42, - 0xb8, 0x6d, 0x20, 0xf6, 0x18, 0x5b, 0x9b, 0xc1, 0xf5, 0x07, 0x38, 0x57, 0x26, 0x78, 0x6d, 0x14, - 0xe3, 0xf6, 0xf2, 0x5b, 0xaf, 0xf5, 0xe1, 0x7b, 0x0f, 0xf9, 0xf5, 0x31, 0xfb, 0x16, 0xb6, 0x58, - 0x90, 0x82, 0x14, 0x41, 0x04, 0xa5, 0x0b, 0x96, 0x5f, 0x6f, 0xd4, 0xae, 0xed, 0xfe, 0xd6, 0xb5, - 0xbd, 0x46, 0xd7, 0x1e, 0xed, 0x9d, 0x7f, 0xee, 0xa1, 0xd1, 0xc7, 0x1d, 0x7c, 0x30, 0x29, 0x5d, - 0xb0, 0x4f, 0xf1, 0xa1, 0x8e, 0xc6, 0xbe, 0xdf, 0xe0, 0xd6, 0xe5, 0x08, 0x9d, 0xeb, 0x57, 0xee, - 0x99, 0xac, 0xdf, 0xc4, 0x9a, 0xa8, 0x83, 0x69, 0x24, 0x5e, 0x0e, 0xf0, 0x8f, 0xc4, 0x18, 0x5b, - 0x9b, 0x4c, 0x6c, 0xaf, 0x81, 0xb9, 0x9d, 0x9e, 0xf3, 0xaf, 0x8f, 0xe0, 0x01, 0x1a, 0x4f, 0x97, - 0x17, 0x6e, 0xeb, 0xeb, 0x85, 0xdb, 0x3a, 0x5f, 0xb9, 0x68, 0xb9, 0x72, 0xd1, 0x97, 0x95, 0x8b, - 0x7e, 0xac, 0x5c, 0xf4, 0xf6, 0xc9, 0x7f, 0xfe, 0xd7, 0x8f, 0xab, 0x6a, 0xda, 0x9a, 0xa2, 0xf0, - 0xa0, 0x1c, 0xeb, 0xe1, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe6, 0xbf, 0x19, 0xa6, 0x24, 0x04, - 0x00, 0x00, -} - -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *Envelope) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - // unhandled: timestamp - case "namespace": - return string(m.Namespace), len(m.Namespace) > 0 - case "topic": - return string(m.Topic), len(m.Topic) > 0 - case "event": - decoded, err := github_com_containerd_typeurl.UnmarshalAny(m.Event) - if err != nil { - return "", false - } +func (*PublishRequest) ProtoMessage() {} - adaptor, ok := decoded.(interface{ Field([]string) (string, bool) }) - if !ok { - return "", false +func (x *PublishRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return adaptor.Field(fieldpath[1:]) + return ms } - return "", false -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// EventsClient is the client API for Events service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type EventsClient interface { - // Publish an event to a topic. - // - // The event will be packed into a timestamp envelope with the namespace - // introspected from the context. The envelope will then be dispatched. - Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*types.Empty, error) - // Forward sends an event that has already been packaged into an envelope - // with a timestamp and namespace. - // - // This is useful if earlier timestamping is required or when forwarding on - // behalf of another component, namespace or publisher. - Forward(ctx context.Context, in *ForwardRequest, opts ...grpc.CallOption) (*types.Empty, error) - // Subscribe to a stream of events, possibly returning only that match any - // of the provided filters. - // - // Unlike many other methods in containerd, subscribers will get messages - // from all namespaces unless otherwise specified. If this is not desired, - // a filter can be provided in the format 'namespace==' to - // restrict the received events. - Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (Events_SubscribeClient, error) -} - -type eventsClient struct { - cc *grpc.ClientConn + return mi.MessageOf(x) } -func NewEventsClient(cc *grpc.ClientConn) EventsClient { - return &eventsClient{cc} -} - -func (c *eventsClient) Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*types.Empty, error) { - out := new(types.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.events.v1.Events/Publish", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +// Deprecated: Use PublishRequest.ProtoReflect.Descriptor instead. +func (*PublishRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDescGZIP(), []int{0} } -func (c *eventsClient) Forward(ctx context.Context, in *ForwardRequest, opts ...grpc.CallOption) (*types.Empty, error) { - out := new(types.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.events.v1.Events/Forward", in, out, opts...) - if err != nil { - return nil, err +func (x *PublishRequest) GetTopic() string { + if x != nil { + return x.Topic } - return out, nil + return "" } -func (c *eventsClient) Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (Events_SubscribeClient, error) { - stream, err := c.cc.NewStream(ctx, &_Events_serviceDesc.Streams[0], "/containerd.services.events.v1.Events/Subscribe", opts...) - if err != nil { - return nil, err +func (x *PublishRequest) GetEvent() *anypb.Any { + if x != nil { + return x.Event } - x := &eventsSubscribeClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil + return nil } -type Events_SubscribeClient interface { - Recv() (*Envelope, error) - grpc.ClientStream -} +type ForwardRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -type eventsSubscribeClient struct { - grpc.ClientStream + Envelope *Envelope `protobuf:"bytes,1,opt,name=envelope,proto3" json:"envelope,omitempty"` } -func (x *eventsSubscribeClient) Recv() (*Envelope, error) { - m := new(Envelope) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err +func (x *ForwardRequest) Reset() { + *x = ForwardRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return m, nil } -// EventsServer is the server API for Events service. -type EventsServer interface { - // Publish an event to a topic. - // - // The event will be packed into a timestamp envelope with the namespace - // introspected from the context. The envelope will then be dispatched. - Publish(context.Context, *PublishRequest) (*types.Empty, error) - // Forward sends an event that has already been packaged into an envelope - // with a timestamp and namespace. - // - // This is useful if earlier timestamping is required or when forwarding on - // behalf of another component, namespace or publisher. - Forward(context.Context, *ForwardRequest) (*types.Empty, error) - // Subscribe to a stream of events, possibly returning only that match any - // of the provided filters. - // - // Unlike many other methods in containerd, subscribers will get messages - // from all namespaces unless otherwise specified. If this is not desired, - // a filter can be provided in the format 'namespace==' to - // restrict the received events. - Subscribe(*SubscribeRequest, Events_SubscribeServer) error +func (x *ForwardRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -// UnimplementedEventsServer can be embedded to have forward compatible implementations. -type UnimplementedEventsServer struct { -} - -func (*UnimplementedEventsServer) Publish(ctx context.Context, req *PublishRequest) (*types.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Publish not implemented") -} -func (*UnimplementedEventsServer) Forward(ctx context.Context, req *ForwardRequest) (*types.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Forward not implemented") -} -func (*UnimplementedEventsServer) Subscribe(req *SubscribeRequest, srv Events_SubscribeServer) error { - return status.Errorf(codes.Unimplemented, "method Subscribe not implemented") -} - -func RegisterEventsServer(s *grpc.Server, srv EventsServer) { - s.RegisterService(&_Events_serviceDesc, srv) -} +func (*ForwardRequest) ProtoMessage() {} -func _Events_Publish_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PublishRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(EventsServer).Publish(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.events.v1.Events/Publish", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(EventsServer).Publish(ctx, req.(*PublishRequest)) +func (x *ForwardRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return interceptor(ctx, in, info, handler) + return mi.MessageOf(x) } -func _Events_Forward_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ForwardRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(EventsServer).Forward(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.events.v1.Events/Forward", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(EventsServer).Forward(ctx, req.(*ForwardRequest)) - } - return interceptor(ctx, in, info, handler) +// Deprecated: Use ForwardRequest.ProtoReflect.Descriptor instead. +func (*ForwardRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDescGZIP(), []int{1} } -func _Events_Subscribe_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(SubscribeRequest) - if err := stream.RecvMsg(m); err != nil { - return err +func (x *ForwardRequest) GetEnvelope() *Envelope { + if x != nil { + return x.Envelope } - return srv.(EventsServer).Subscribe(m, &eventsSubscribeServer{stream}) -} - -type Events_SubscribeServer interface { - Send(*Envelope) error - grpc.ServerStream -} - -type eventsSubscribeServer struct { - grpc.ServerStream + return nil } -func (x *eventsSubscribeServer) Send(m *Envelope) error { - return x.ServerStream.SendMsg(m) -} +type SubscribeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -var _Events_serviceDesc = grpc.ServiceDesc{ - ServiceName: "containerd.services.events.v1.Events", - HandlerType: (*EventsServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Publish", - Handler: _Events_Publish_Handler, - }, - { - MethodName: "Forward", - Handler: _Events_Forward_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "Subscribe", - Handler: _Events_Subscribe_Handler, - ServerStreams: true, - }, - }, - Metadata: "github.com/containerd/containerd/api/services/events/v1/events.proto", + Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` } -func (m *PublishRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *SubscribeRequest) Reset() { + *x = SubscribeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *PublishRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *SubscribeRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *PublishRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Event != nil { - { - size, err := m.Event.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintEvents(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Topic) > 0 { - i -= len(m.Topic) - copy(dAtA[i:], m.Topic) - i = encodeVarintEvents(dAtA, i, uint64(len(m.Topic))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} +func (*SubscribeRequest) ProtoMessage() {} -func (m *ForwardRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *SubscribeRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *ForwardRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use SubscribeRequest.ProtoReflect.Descriptor instead. +func (*SubscribeRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDescGZIP(), []int{2} } -func (m *ForwardRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Envelope != nil { - { - size, err := m.Envelope.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintEvents(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa +func (x *SubscribeRequest) GetFilters() []string { + if x != nil { + return x.Filters } - return len(dAtA) - i, nil + return nil } -func (m *SubscribeRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} +type Envelope struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *SubscribeRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Topic string `protobuf:"bytes,3,opt,name=topic,proto3" json:"topic,omitempty"` + Event *anypb.Any `protobuf:"bytes,4,opt,name=event,proto3" json:"event,omitempty"` } -func (m *SubscribeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) +func (x *Envelope) Reset() { + *x = Envelope{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - if len(m.Filters) > 0 { - for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Filters[iNdEx]) - copy(dAtA[i:], m.Filters[iNdEx]) - i = encodeVarintEvents(dAtA, i, uint64(len(m.Filters[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil } -func (m *Envelope) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (x *Envelope) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Envelope) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +func (*Envelope) ProtoMessage() {} -func (m *Envelope) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Event != nil { - { - size, err := m.Event.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintEvents(dAtA, i, uint64(size)) +func (x *Envelope) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - i-- - dAtA[i] = 0x22 - } - if len(m.Topic) > 0 { - i -= len(m.Topic) - copy(dAtA[i:], m.Topic) - i = encodeVarintEvents(dAtA, i, uint64(len(m.Topic))) - i-- - dAtA[i] = 0x1a - } - if len(m.Namespace) > 0 { - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintEvents(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x12 - } - n4, err4 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err4 != nil { - return 0, err4 - } - i -= n4 - i = encodeVarintEvents(dAtA, i, uint64(n4)) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintEvents(dAtA []byte, offset int, v uint64) int { - offset -= sovEvents(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ + return ms } - dAtA[offset] = uint8(v) - return base -} -func (m *PublishRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Topic) - if l > 0 { - n += 1 + l + sovEvents(uint64(l)) - } - if m.Event != nil { - l = m.Event.Size() - n += 1 + l + sovEvents(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n + return mi.MessageOf(x) } -func (m *ForwardRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Envelope != nil { - l = m.Envelope.Size() - n += 1 + l + sovEvents(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n +// Deprecated: Use Envelope.ProtoReflect.Descriptor instead. +func (*Envelope) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDescGZIP(), []int{3} } -func (m *SubscribeRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Filters) > 0 { - for _, s := range m.Filters { - l = len(s) - n += 1 + l + sovEvents(uint64(l)) - } +func (x *Envelope) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n + return nil } -func (m *Envelope) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovEvents(uint64(l)) - l = len(m.Namespace) - if l > 0 { - n += 1 + l + sovEvents(uint64(l)) - } - l = len(m.Topic) - if l > 0 { - n += 1 + l + sovEvents(uint64(l)) - } - if m.Event != nil { - l = m.Event.Size() - n += 1 + l + sovEvents(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) +func (x *Envelope) GetNamespace() string { + if x != nil { + return x.Namespace } - return n + return "" } -func sovEvents(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozEvents(x uint64) (n int) { - return sovEvents(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *PublishRequest) String() string { - if this == nil { - return "nil" +func (x *Envelope) GetTopic() string { + if x != nil { + return x.Topic } - s := strings.Join([]string{`&PublishRequest{`, - `Topic:` + fmt.Sprintf("%v", this.Topic) + `,`, - `Event:` + strings.Replace(fmt.Sprintf("%v", this.Event), "Any", "types.Any", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s + return "" } -func (this *ForwardRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ForwardRequest{`, - `Envelope:` + strings.Replace(this.Envelope.String(), "Envelope", "Envelope", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *SubscribeRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SubscribeRequest{`, - `Filters:` + fmt.Sprintf("%v", this.Filters) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *Envelope) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Envelope{`, - `Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Topic:` + fmt.Sprintf("%v", this.Topic) + `,`, - `Event:` + strings.Replace(fmt.Sprintf("%v", this.Event), "Any", "types.Any", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringEvents(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *PublishRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PublishRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PublishRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEvents - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEvents - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Topic = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvents - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvents - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Event == nil { - m.Event = &types.Any{} - } - if err := m.Event.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipEvents(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEvents - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *Envelope) GetEvent() *anypb.Any { + if x != nil { + return x.Event } return nil } -func (m *ForwardRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ForwardRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ForwardRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Envelope", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvents - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvents - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Envelope == nil { - m.Envelope = &Envelope{} - } - if err := m.Envelope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipEvents(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEvents - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil +var File_github_com_containerd_containerd_api_services_events_v1_events_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDesc = []byte{ + 0x0a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x70, 0x61, 0x74, + 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x52, 0x0a, 0x0e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x22, 0x55, 0x0a, 0x0e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x43, 0x0a, 0x08, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, + 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, + 0x65, 0x52, 0x08, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x22, 0x2c, 0x0a, 0x10, 0x53, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x22, 0xaa, 0x01, 0x0a, 0x08, 0x45, 0x6e, + 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, + 0x6f, 0x70, 0x69, 0x63, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x3a, 0x04, 0x80, 0xb9, 0x1f, 0x01, 0x32, 0x95, 0x02, 0x0a, 0x06, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x73, 0x12, 0x50, 0x0a, 0x07, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x12, 0x2d, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, + 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x12, 0x50, 0x0a, 0x07, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x12, 0x2d, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x46, + 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x67, 0x0a, 0x09, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x62, 0x65, 0x12, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x30, 0x01, 0x42, 0x40, + 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x76, 0x31, 0x3b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func (m *SubscribeRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SubscribeRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SubscribeRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEvents - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEvents - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipEvents(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEvents - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Envelope) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break +var ( + file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDescData = file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDesc +) + +func file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_github_com_containerd_containerd_api_services_events_v1_events_proto_goTypes = []interface{}{ + (*PublishRequest)(nil), // 0: containerd.services.events.v1.PublishRequest + (*ForwardRequest)(nil), // 1: containerd.services.events.v1.ForwardRequest + (*SubscribeRequest)(nil), // 2: containerd.services.events.v1.SubscribeRequest + (*Envelope)(nil), // 3: containerd.services.events.v1.Envelope + (*anypb.Any)(nil), // 4: google.protobuf.Any + (*timestamppb.Timestamp)(nil), // 5: google.protobuf.Timestamp + (*emptypb.Empty)(nil), // 6: google.protobuf.Empty +} +var file_github_com_containerd_containerd_api_services_events_v1_events_proto_depIdxs = []int32{ + 4, // 0: containerd.services.events.v1.PublishRequest.event:type_name -> google.protobuf.Any + 3, // 1: containerd.services.events.v1.ForwardRequest.envelope:type_name -> containerd.services.events.v1.Envelope + 5, // 2: containerd.services.events.v1.Envelope.timestamp:type_name -> google.protobuf.Timestamp + 4, // 3: containerd.services.events.v1.Envelope.event:type_name -> google.protobuf.Any + 0, // 4: containerd.services.events.v1.Events.Publish:input_type -> containerd.services.events.v1.PublishRequest + 1, // 5: containerd.services.events.v1.Events.Forward:input_type -> containerd.services.events.v1.ForwardRequest + 2, // 6: containerd.services.events.v1.Events.Subscribe:input_type -> containerd.services.events.v1.SubscribeRequest + 6, // 7: containerd.services.events.v1.Events.Publish:output_type -> google.protobuf.Empty + 6, // 8: containerd.services.events.v1.Events.Forward:output_type -> google.protobuf.Empty + 3, // 9: containerd.services.events.v1.Events.Subscribe:output_type -> containerd.services.events.v1.Envelope + 7, // [7:10] is the sub-list for method output_type + 4, // [4:7] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_services_events_v1_events_proto_init() } +func file_github_com_containerd_containerd_api_services_events_v1_events_proto_init() { + if File_github_com_containerd_containerd_api_services_events_v1_events_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublishRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Envelope: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Envelope: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvents - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvents - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ForwardRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEvents - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEvents - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEvents - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEvents - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Topic = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvents - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvents - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Event == nil { - m.Event = &types.Any{} - } - if err := m.Event.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipEvents(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEvents - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipEvents(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEvents - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SubscribeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEvents - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEvents - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } + file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Envelope); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } - if length < 0 { - return 0, ErrInvalidLengthEvents - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupEvents - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthEvents - } - if depth == 0 { - return iNdEx, nil } } - return 0, io.ErrUnexpectedEOF + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_services_events_v1_events_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_events_v1_events_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_services_events_v1_events_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_events_v1_events_proto = out.File + file_github_com_containerd_containerd_api_services_events_v1_events_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_events_v1_events_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_events_v1_events_proto_depIdxs = nil } - -var ( - ErrInvalidLengthEvents = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowEvents = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupEvents = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/events.proto b/vendor/github.com/containerd/containerd/api/services/events/v1/events.proto index 9a2444e54ca10..3e0f11ffb8409 100644 --- a/vendor/github.com/containerd/containerd/api/services/events/v1/events.proto +++ b/vendor/github.com/containerd/containerd/api/services/events/v1/events.proto @@ -18,8 +18,7 @@ syntax = "proto3"; package containerd.services.events.v1; -import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; -import weak "gogoproto/gogo.proto"; +import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/timestamp.proto"; @@ -65,7 +64,7 @@ message SubscribeRequest { message Envelope { option (containerd.plugin.fieldpath) = true; - google.protobuf.Timestamp timestamp = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp timestamp = 1; string namespace = 2; string topic = 3; google.protobuf.Any event = 4; diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/events_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/events/v1/events_grpc.pb.go new file mode 100644 index 0000000000000..768306555cf11 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/events/v1/events_grpc.pb.go @@ -0,0 +1,238 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/events/v1/events.proto + +package events + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// EventsClient is the client API for Events service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type EventsClient interface { + // Publish an event to a topic. + // + // The event will be packed into a timestamp envelope with the namespace + // introspected from the context. The envelope will then be dispatched. + Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Forward sends an event that has already been packaged into an envelope + // with a timestamp and namespace. + // + // This is useful if earlier timestamping is required or when forwarding on + // behalf of another component, namespace or publisher. + Forward(ctx context.Context, in *ForwardRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Subscribe to a stream of events, possibly returning only that match any + // of the provided filters. + // + // Unlike many other methods in containerd, subscribers will get messages + // from all namespaces unless otherwise specified. If this is not desired, + // a filter can be provided in the format 'namespace==' to + // restrict the received events. + Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (Events_SubscribeClient, error) +} + +type eventsClient struct { + cc grpc.ClientConnInterface +} + +func NewEventsClient(cc grpc.ClientConnInterface) EventsClient { + return &eventsClient{cc} +} + +func (c *eventsClient) Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.events.v1.Events/Publish", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *eventsClient) Forward(ctx context.Context, in *ForwardRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.events.v1.Events/Forward", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *eventsClient) Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (Events_SubscribeClient, error) { + stream, err := c.cc.NewStream(ctx, &Events_ServiceDesc.Streams[0], "/containerd.services.events.v1.Events/Subscribe", opts...) + if err != nil { + return nil, err + } + x := &eventsSubscribeClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Events_SubscribeClient interface { + Recv() (*Envelope, error) + grpc.ClientStream +} + +type eventsSubscribeClient struct { + grpc.ClientStream +} + +func (x *eventsSubscribeClient) Recv() (*Envelope, error) { + m := new(Envelope) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// EventsServer is the server API for Events service. +// All implementations must embed UnimplementedEventsServer +// for forward compatibility +type EventsServer interface { + // Publish an event to a topic. + // + // The event will be packed into a timestamp envelope with the namespace + // introspected from the context. The envelope will then be dispatched. + Publish(context.Context, *PublishRequest) (*emptypb.Empty, error) + // Forward sends an event that has already been packaged into an envelope + // with a timestamp and namespace. + // + // This is useful if earlier timestamping is required or when forwarding on + // behalf of another component, namespace or publisher. + Forward(context.Context, *ForwardRequest) (*emptypb.Empty, error) + // Subscribe to a stream of events, possibly returning only that match any + // of the provided filters. + // + // Unlike many other methods in containerd, subscribers will get messages + // from all namespaces unless otherwise specified. If this is not desired, + // a filter can be provided in the format 'namespace==' to + // restrict the received events. + Subscribe(*SubscribeRequest, Events_SubscribeServer) error + mustEmbedUnimplementedEventsServer() +} + +// UnimplementedEventsServer must be embedded to have forward compatible implementations. +type UnimplementedEventsServer struct { +} + +func (UnimplementedEventsServer) Publish(context.Context, *PublishRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Publish not implemented") +} +func (UnimplementedEventsServer) Forward(context.Context, *ForwardRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Forward not implemented") +} +func (UnimplementedEventsServer) Subscribe(*SubscribeRequest, Events_SubscribeServer) error { + return status.Errorf(codes.Unimplemented, "method Subscribe not implemented") +} +func (UnimplementedEventsServer) mustEmbedUnimplementedEventsServer() {} + +// UnsafeEventsServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to EventsServer will +// result in compilation errors. +type UnsafeEventsServer interface { + mustEmbedUnimplementedEventsServer() +} + +func RegisterEventsServer(s grpc.ServiceRegistrar, srv EventsServer) { + s.RegisterService(&Events_ServiceDesc, srv) +} + +func _Events_Publish_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PublishRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EventsServer).Publish(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.events.v1.Events/Publish", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EventsServer).Publish(ctx, req.(*PublishRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Events_Forward_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ForwardRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(EventsServer).Forward(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.events.v1.Events/Forward", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(EventsServer).Forward(ctx, req.(*ForwardRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Events_Subscribe_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SubscribeRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(EventsServer).Subscribe(m, &eventsSubscribeServer{stream}) +} + +type Events_SubscribeServer interface { + Send(*Envelope) error + grpc.ServerStream +} + +type eventsSubscribeServer struct { + grpc.ServerStream +} + +func (x *eventsSubscribeServer) Send(m *Envelope) error { + return x.ServerStream.SendMsg(m) +} + +// Events_ServiceDesc is the grpc.ServiceDesc for Events service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Events_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.events.v1.Events", + HandlerType: (*EventsServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Publish", + Handler: _Events_Publish_Handler, + }, + { + MethodName: "Forward", + Handler: _Events_Forward_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Subscribe", + Handler: _Events_Subscribe_Handler, + ServerStreams: true, + }, + }, + Metadata: "github.com/containerd/containerd/api/services/events/v1/events.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go b/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go index de08cc08358a5..52aff3dd2f1c6 100644 --- a/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go @@ -1,40 +1,49 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/services/images/v1/images.proto package images import ( - context "context" - fmt "fmt" types "github.com/containerd/containerd/api/types" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types1 "github.com/gogo/protobuf/types" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - strings "strings" - time "time" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type Image struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Name provides a unique name for the image. // // Containerd treats this as the primary identifier. @@ -46,2693 +55,892 @@ type Image struct { // The combined size of a key/value pair cannot exceed 4096 bytes. Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Target describes the content entry point of the image. - Target types.Descriptor `protobuf:"bytes,3,opt,name=target,proto3" json:"target"` + Target *types.Descriptor `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` // CreatedAt is the time the image was first created. - CreatedAt time.Time `protobuf:"bytes,7,opt,name=created_at,json=createdAt,proto3,stdtime" json:"created_at"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` // UpdatedAt is the last time the image was mutated. - UpdatedAt time.Time `protobuf:"bytes,8,opt,name=updated_at,json=updatedAt,proto3,stdtime" json:"updated_at"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,8,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` } -func (m *Image) Reset() { *m = Image{} } -func (*Image) ProtoMessage() {} -func (*Image) Descriptor() ([]byte, []int) { - return fileDescriptor_8666fa071128ae5f, []int{0} -} -func (m *Image) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Image) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Image.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *Image) Reset() { + *x = Image{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *Image) XXX_Merge(src proto.Message) { - xxx_messageInfo_Image.Merge(m, src) -} -func (m *Image) XXX_Size() int { - return m.Size() -} -func (m *Image) XXX_DiscardUnknown() { - xxx_messageInfo_Image.DiscardUnknown(m) -} - -var xxx_messageInfo_Image proto.InternalMessageInfo -type GetImageRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *Image) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetImageRequest) Reset() { *m = GetImageRequest{} } -func (*GetImageRequest) ProtoMessage() {} -func (*GetImageRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_8666fa071128ae5f, []int{1} -} -func (m *GetImageRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetImageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetImageRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*Image) ProtoMessage() {} + +func (x *Image) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *GetImageRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetImageRequest.Merge(m, src) -} -func (m *GetImageRequest) XXX_Size() int { - return m.Size() -} -func (m *GetImageRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetImageRequest.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_GetImageRequest proto.InternalMessageInfo - -type GetImageResponse struct { - Image *Image `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +// Deprecated: Use Image.ProtoReflect.Descriptor instead. +func (*Image) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescGZIP(), []int{0} } -func (m *GetImageResponse) Reset() { *m = GetImageResponse{} } -func (*GetImageResponse) ProtoMessage() {} -func (*GetImageResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_8666fa071128ae5f, []int{2} -} -func (m *GetImageResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetImageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetImageResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *Image) GetName() string { + if x != nil { + return x.Name } + return "" } -func (m *GetImageResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetImageResponse.Merge(m, src) -} -func (m *GetImageResponse) XXX_Size() int { - return m.Size() -} -func (m *GetImageResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetImageResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetImageResponse proto.InternalMessageInfo -type CreateImageRequest struct { - Image Image `protobuf:"bytes,1,opt,name=image,proto3" json:"image"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *Image) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil } -func (m *CreateImageRequest) Reset() { *m = CreateImageRequest{} } -func (*CreateImageRequest) ProtoMessage() {} -func (*CreateImageRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_8666fa071128ae5f, []int{3} -} -func (m *CreateImageRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CreateImageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CreateImageRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *Image) GetTarget() *types.Descriptor { + if x != nil { + return x.Target } -} -func (m *CreateImageRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateImageRequest.Merge(m, src) -} -func (m *CreateImageRequest) XXX_Size() int { - return m.Size() -} -func (m *CreateImageRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CreateImageRequest.DiscardUnknown(m) + return nil } -var xxx_messageInfo_CreateImageRequest proto.InternalMessageInfo - -type CreateImageResponse struct { - Image Image `protobuf:"bytes,1,opt,name=image,proto3" json:"image"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *Image) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil } -func (m *CreateImageResponse) Reset() { *m = CreateImageResponse{} } -func (*CreateImageResponse) ProtoMessage() {} -func (*CreateImageResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_8666fa071128ae5f, []int{4} -} -func (m *CreateImageResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CreateImageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CreateImageResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *Image) GetUpdatedAt() *timestamppb.Timestamp { + if x != nil { + return x.UpdatedAt } -} -func (m *CreateImageResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateImageResponse.Merge(m, src) -} -func (m *CreateImageResponse) XXX_Size() int { - return m.Size() -} -func (m *CreateImageResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CreateImageResponse.DiscardUnknown(m) + return nil } -var xxx_messageInfo_CreateImageResponse proto.InternalMessageInfo +type GetImageRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -type UpdateImageRequest struct { - // Image provides a full or partial image for update. - // - // The name field must be set or an error will be returned. - Image Image `protobuf:"bytes,1,opt,name=image,proto3" json:"image"` - // UpdateMask specifies which fields to perform the update on. If empty, - // the operation applies to all fields. - UpdateMask *types1.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } -func (m *UpdateImageRequest) Reset() { *m = UpdateImageRequest{} } -func (*UpdateImageRequest) ProtoMessage() {} -func (*UpdateImageRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_8666fa071128ae5f, []int{5} -} -func (m *UpdateImageRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UpdateImageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UpdateImageRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *GetImageRequest) Reset() { + *x = GetImageRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *UpdateImageRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateImageRequest.Merge(m, src) -} -func (m *UpdateImageRequest) XXX_Size() int { - return m.Size() -} -func (m *UpdateImageRequest) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateImageRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_UpdateImageRequest proto.InternalMessageInfo -type UpdateImageResponse struct { - Image Image `protobuf:"bytes,1,opt,name=image,proto3" json:"image"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *GetImageRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *UpdateImageResponse) Reset() { *m = UpdateImageResponse{} } -func (*UpdateImageResponse) ProtoMessage() {} -func (*UpdateImageResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_8666fa071128ae5f, []int{6} -} -func (m *UpdateImageResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UpdateImageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UpdateImageResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*GetImageRequest) ProtoMessage() {} + +func (x *GetImageRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *UpdateImageResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateImageResponse.Merge(m, src) -} -func (m *UpdateImageResponse) XXX_Size() int { - return m.Size() -} -func (m *UpdateImageResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateImageResponse.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_UpdateImageResponse proto.InternalMessageInfo - -type ListImagesRequest struct { - // Filters contains one or more filters using the syntax defined in the - // containerd filter package. - // - // The returned result will be those that match any of the provided - // filters. Expanded, images that match the following will be - // returned: - // - // filters[0] or filters[1] or ... or filters[n-1] or filters[n] - // - // If filters is zero-length or nil, all items will be returned. - Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +// Deprecated: Use GetImageRequest.ProtoReflect.Descriptor instead. +func (*GetImageRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescGZIP(), []int{1} } -func (m *ListImagesRequest) Reset() { *m = ListImagesRequest{} } -func (*ListImagesRequest) ProtoMessage() {} -func (*ListImagesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_8666fa071128ae5f, []int{7} -} -func (m *ListImagesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListImagesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListImagesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *GetImageRequest) GetName() string { + if x != nil { + return x.Name } -} -func (m *ListImagesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListImagesRequest.Merge(m, src) -} -func (m *ListImagesRequest) XXX_Size() int { - return m.Size() -} -func (m *ListImagesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListImagesRequest.DiscardUnknown(m) + return "" } -var xxx_messageInfo_ListImagesRequest proto.InternalMessageInfo +type GetImageResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -type ListImagesResponse struct { - Images []Image `protobuf:"bytes,1,rep,name=images,proto3" json:"images"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Image *Image `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"` } -func (m *ListImagesResponse) Reset() { *m = ListImagesResponse{} } -func (*ListImagesResponse) ProtoMessage() {} -func (*ListImagesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_8666fa071128ae5f, []int{8} -} -func (m *ListImagesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListImagesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListImagesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *GetImageResponse) Reset() { + *x = GetImageResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ListImagesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListImagesResponse.Merge(m, src) -} -func (m *ListImagesResponse) XXX_Size() int { - return m.Size() -} -func (m *ListImagesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListImagesResponse.DiscardUnknown(m) -} -var xxx_messageInfo_ListImagesResponse proto.InternalMessageInfo - -type DeleteImageRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Sync indicates that the delete and cleanup should be done - // synchronously before returning to the caller - // - // Default is false - Sync bool `protobuf:"varint,2,opt,name=sync,proto3" json:"sync,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *GetImageResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DeleteImageRequest) Reset() { *m = DeleteImageRequest{} } -func (*DeleteImageRequest) ProtoMessage() {} -func (*DeleteImageRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_8666fa071128ae5f, []int{9} -} -func (m *DeleteImageRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteImageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteImageRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*GetImageResponse) ProtoMessage() {} + +func (x *GetImageResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *DeleteImageRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteImageRequest.Merge(m, src) -} -func (m *DeleteImageRequest) XXX_Size() int { - return m.Size() -} -func (m *DeleteImageRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteImageRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteImageRequest proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Image)(nil), "containerd.services.images.v1.Image") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.images.v1.Image.LabelsEntry") - proto.RegisterType((*GetImageRequest)(nil), "containerd.services.images.v1.GetImageRequest") - proto.RegisterType((*GetImageResponse)(nil), "containerd.services.images.v1.GetImageResponse") - proto.RegisterType((*CreateImageRequest)(nil), "containerd.services.images.v1.CreateImageRequest") - proto.RegisterType((*CreateImageResponse)(nil), "containerd.services.images.v1.CreateImageResponse") - proto.RegisterType((*UpdateImageRequest)(nil), "containerd.services.images.v1.UpdateImageRequest") - proto.RegisterType((*UpdateImageResponse)(nil), "containerd.services.images.v1.UpdateImageResponse") - proto.RegisterType((*ListImagesRequest)(nil), "containerd.services.images.v1.ListImagesRequest") - proto.RegisterType((*ListImagesResponse)(nil), "containerd.services.images.v1.ListImagesResponse") - proto.RegisterType((*DeleteImageRequest)(nil), "containerd.services.images.v1.DeleteImageRequest") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/images/v1/images.proto", fileDescriptor_8666fa071128ae5f) -} - -var fileDescriptor_8666fa071128ae5f = []byte{ - // 659 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0x8e, 0x93, 0xd4, 0x6d, 0x27, 0x07, 0xca, 0x52, 0x21, 0xcb, 0x40, 0x1a, 0x45, 0x20, 0xe5, - 0xc2, 0x9a, 0x86, 0x0b, 0xb4, 0x08, 0xd1, 0xb4, 0xa5, 0x20, 0x15, 0x0e, 0xe6, 0xaf, 0xe2, 0x52, - 0x6d, 0x92, 0x89, 0xb1, 0x62, 0xc7, 0xc6, 0xbb, 0x89, 0x94, 0x1b, 0x8f, 0x80, 0x04, 0x0f, 0xd5, - 0x23, 0x47, 0x4e, 0x40, 0x73, 0xe0, 0x39, 0x90, 0x77, 0x37, 0x34, 0x4d, 0x22, 0x92, 0x94, 0xde, - 0x66, 0xed, 0xef, 0x9b, 0x9f, 0x6f, 0x66, 0x76, 0x61, 0xcf, 0xf3, 0xc5, 0x87, 0x6e, 0x9d, 0x36, - 0xa2, 0xd0, 0x69, 0x44, 0x1d, 0xc1, 0xfc, 0x0e, 0x26, 0xcd, 0x51, 0x93, 0xc5, 0xbe, 0xc3, 0x31, - 0xe9, 0xf9, 0x0d, 0xe4, 0x8e, 0x1f, 0x32, 0x0f, 0xb9, 0xd3, 0xdb, 0xd4, 0x16, 0x8d, 0x93, 0x48, - 0x44, 0xe4, 0xd6, 0x19, 0x9e, 0x0e, 0xb1, 0x54, 0x23, 0x7a, 0x9b, 0xf6, 0xba, 0x17, 0x79, 0x91, - 0x44, 0x3a, 0xa9, 0xa5, 0x48, 0xf6, 0x0d, 0x2f, 0x8a, 0xbc, 0x00, 0x1d, 0x79, 0xaa, 0x77, 0x5b, - 0x0e, 0x86, 0xb1, 0xe8, 0xeb, 0x9f, 0xa5, 0xf1, 0x9f, 0x2d, 0x1f, 0x83, 0xe6, 0x71, 0xc8, 0x78, - 0x5b, 0x23, 0x36, 0xc6, 0x11, 0xc2, 0x0f, 0x91, 0x0b, 0x16, 0xc6, 0x1a, 0xb0, 0x3d, 0x57, 0x69, - 0xa2, 0x1f, 0x23, 0x77, 0x9a, 0xc8, 0x1b, 0x89, 0x1f, 0x8b, 0x28, 0x51, 0xe4, 0xf2, 0xef, 0x2c, - 0x2c, 0x3d, 0x4f, 0x0b, 0x20, 0x04, 0xf2, 0x1d, 0x16, 0xa2, 0x65, 0x94, 0x8c, 0xca, 0xaa, 0x2b, - 0x6d, 0xf2, 0x0c, 0xcc, 0x80, 0xd5, 0x31, 0xe0, 0x56, 0xb6, 0x94, 0xab, 0x14, 0xaa, 0xf7, 0xe8, - 0x3f, 0x05, 0xa0, 0xd2, 0x13, 0x3d, 0x94, 0x94, 0xfd, 0x8e, 0x48, 0xfa, 0xae, 0xe6, 0x93, 0x2d, - 0x30, 0x05, 0x4b, 0x3c, 0x14, 0x56, 0xae, 0x64, 0x54, 0x0a, 0xd5, 0x9b, 0xa3, 0x9e, 0x64, 0x6e, - 0x74, 0xef, 0x6f, 0x6e, 0xb5, 0xfc, 0xc9, 0x8f, 0x8d, 0x8c, 0xab, 0x19, 0x64, 0x17, 0xa0, 0x91, - 0x20, 0x13, 0xd8, 0x3c, 0x66, 0xc2, 0x5a, 0x96, 0x7c, 0x9b, 0x2a, 0x59, 0xe8, 0x50, 0x16, 0xfa, - 0x7a, 0x28, 0x4b, 0x6d, 0x25, 0x65, 0x7f, 0xfe, 0xb9, 0x61, 0xb8, 0xab, 0x9a, 0xb7, 0x23, 0x9d, - 0x74, 0xe3, 0xe6, 0xd0, 0xc9, 0xca, 0x22, 0x4e, 0x34, 0x6f, 0x47, 0xd8, 0x0f, 0xa1, 0x30, 0x52, - 0x1c, 0x59, 0x83, 0x5c, 0x1b, 0xfb, 0x5a, 0xb1, 0xd4, 0x24, 0xeb, 0xb0, 0xd4, 0x63, 0x41, 0x17, - 0xad, 0xac, 0xfc, 0xa6, 0x0e, 0x5b, 0xd9, 0x07, 0x46, 0xf9, 0x0e, 0x5c, 0x39, 0x40, 0x21, 0x05, - 0x72, 0xf1, 0x63, 0x17, 0xb9, 0x98, 0xa6, 0x78, 0xf9, 0x25, 0xac, 0x9d, 0xc1, 0x78, 0x1c, 0x75, - 0x38, 0x92, 0x2d, 0x58, 0x92, 0x12, 0x4b, 0x60, 0xa1, 0x7a, 0x7b, 0x9e, 0x26, 0xb8, 0x8a, 0x52, - 0x7e, 0x0b, 0x64, 0x57, 0x6a, 0x70, 0x2e, 0xf2, 0x93, 0x0b, 0x78, 0xd4, 0x4d, 0xd1, 0x7e, 0xdf, - 0xc1, 0xb5, 0x73, 0x7e, 0x75, 0xaa, 0xff, 0xef, 0xf8, 0x8b, 0x01, 0xe4, 0x8d, 0x14, 0xfc, 0x72, - 0x33, 0x26, 0xdb, 0x50, 0x50, 0x8d, 0x94, 0xcb, 0x25, 0x1b, 0x34, 0x6d, 0x02, 0x9e, 0xa6, 0xfb, - 0xf7, 0x82, 0xf1, 0xb6, 0xab, 0xe7, 0x25, 0xb5, 0xd3, 0x72, 0xcf, 0x25, 0x75, 0x69, 0xe5, 0xde, - 0x85, 0xab, 0x87, 0x3e, 0x57, 0x0d, 0xe7, 0xc3, 0x62, 0x2d, 0x58, 0x6e, 0xf9, 0x81, 0xc0, 0x84, - 0x5b, 0x46, 0x29, 0x57, 0x59, 0x75, 0x87, 0xc7, 0xf2, 0x11, 0x90, 0x51, 0xb8, 0x4e, 0xa3, 0x06, - 0xa6, 0x0a, 0x22, 0xe1, 0x8b, 0xe5, 0xa1, 0x99, 0xe5, 0x47, 0x40, 0xf6, 0x30, 0xc0, 0x31, 0xd9, - 0xa7, 0x5d, 0x0a, 0x04, 0xf2, 0xbc, 0xdf, 0x69, 0x48, 0x05, 0x57, 0x5c, 0x69, 0x57, 0xbf, 0xe6, - 0xc1, 0x54, 0x49, 0x91, 0x16, 0xe4, 0x0e, 0x50, 0x10, 0x3a, 0x23, 0x87, 0xb1, 0x65, 0xb0, 0x9d, - 0xb9, 0xf1, 0xba, 0xe8, 0x36, 0xe4, 0x53, 0x29, 0xc8, 0xac, 0x3b, 0x69, 0x42, 0x5e, 0x7b, 0x73, - 0x01, 0x86, 0x0e, 0x16, 0x81, 0xa9, 0xc6, 0x9d, 0xcc, 0x22, 0x4f, 0x6e, 0x9b, 0x5d, 0x5d, 0x84, - 0x72, 0x16, 0x50, 0x0d, 0xdc, 0xcc, 0x80, 0x93, 0xcb, 0x32, 0x33, 0xe0, 0xb4, 0x51, 0x7e, 0x05, - 0xa6, 0xea, 0xff, 0xcc, 0x80, 0x93, 0x63, 0x62, 0x5f, 0x9f, 0x58, 0xa3, 0xfd, 0xf4, 0x8d, 0xab, - 0x1d, 0x9d, 0x9c, 0x16, 0x33, 0xdf, 0x4f, 0x8b, 0x99, 0x4f, 0x83, 0xa2, 0x71, 0x32, 0x28, 0x1a, - 0xdf, 0x06, 0x45, 0xe3, 0xd7, 0xa0, 0x68, 0xbc, 0x7f, 0x7c, 0xc1, 0xf7, 0x78, 0x5b, 0x59, 0x47, - 0x99, 0xba, 0x29, 0x63, 0xdd, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0x24, 0x4e, 0xca, 0x64, 0xda, - 0x07, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// ImagesClient is the client API for Images service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ImagesClient interface { - // Get returns an image by name. - Get(ctx context.Context, in *GetImageRequest, opts ...grpc.CallOption) (*GetImageResponse, error) - // List returns a list of all images known to containerd. - List(ctx context.Context, in *ListImagesRequest, opts ...grpc.CallOption) (*ListImagesResponse, error) - // Create an image record in the metadata store. - // - // The name of the image must be unique. - Create(ctx context.Context, in *CreateImageRequest, opts ...grpc.CallOption) (*CreateImageResponse, error) - // Update assigns the name to a given target image based on the provided - // image. - Update(ctx context.Context, in *UpdateImageRequest, opts ...grpc.CallOption) (*UpdateImageResponse, error) - // Delete deletes the image by name. - Delete(ctx context.Context, in *DeleteImageRequest, opts ...grpc.CallOption) (*types1.Empty, error) + return mi.MessageOf(x) } -type imagesClient struct { - cc *grpc.ClientConn -} - -func NewImagesClient(cc *grpc.ClientConn) ImagesClient { - return &imagesClient{cc} +// Deprecated: Use GetImageResponse.ProtoReflect.Descriptor instead. +func (*GetImageResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescGZIP(), []int{2} } -func (c *imagesClient) Get(ctx context.Context, in *GetImageRequest, opts ...grpc.CallOption) (*GetImageResponse, error) { - out := new(GetImageResponse) - err := c.cc.Invoke(ctx, "/containerd.services.images.v1.Images/Get", in, out, opts...) - if err != nil { - return nil, err +func (x *GetImageResponse) GetImage() *Image { + if x != nil { + return x.Image } - return out, nil + return nil } -func (c *imagesClient) List(ctx context.Context, in *ListImagesRequest, opts ...grpc.CallOption) (*ListImagesResponse, error) { - out := new(ListImagesResponse) - err := c.cc.Invoke(ctx, "/containerd.services.images.v1.Images/List", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +type CreateImageRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Image *Image `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"` + SourceDateEpoch *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=source_date_epoch,json=sourceDateEpoch,proto3" json:"source_date_epoch,omitempty"` } -func (c *imagesClient) Create(ctx context.Context, in *CreateImageRequest, opts ...grpc.CallOption) (*CreateImageResponse, error) { - out := new(CreateImageResponse) - err := c.cc.Invoke(ctx, "/containerd.services.images.v1.Images/Create", in, out, opts...) - if err != nil { - return nil, err +func (x *CreateImageRequest) Reset() { + *x = CreateImageRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return out, nil } -func (c *imagesClient) Update(ctx context.Context, in *UpdateImageRequest, opts ...grpc.CallOption) (*UpdateImageResponse, error) { - out := new(UpdateImageResponse) - err := c.cc.Invoke(ctx, "/containerd.services.images.v1.Images/Update", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +func (x *CreateImageRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (c *imagesClient) Delete(ctx context.Context, in *DeleteImageRequest, opts ...grpc.CallOption) (*types1.Empty, error) { - out := new(types1.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.images.v1.Images/Delete", in, out, opts...) - if err != nil { - return nil, err +func (*CreateImageRequest) ProtoMessage() {} + +func (x *CreateImageRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return out, nil + return mi.MessageOf(x) } -// ImagesServer is the server API for Images service. -type ImagesServer interface { - // Get returns an image by name. - Get(context.Context, *GetImageRequest) (*GetImageResponse, error) - // List returns a list of all images known to containerd. - List(context.Context, *ListImagesRequest) (*ListImagesResponse, error) - // Create an image record in the metadata store. - // - // The name of the image must be unique. - Create(context.Context, *CreateImageRequest) (*CreateImageResponse, error) - // Update assigns the name to a given target image based on the provided - // image. - Update(context.Context, *UpdateImageRequest) (*UpdateImageResponse, error) - // Delete deletes the image by name. - Delete(context.Context, *DeleteImageRequest) (*types1.Empty, error) +// Deprecated: Use CreateImageRequest.ProtoReflect.Descriptor instead. +func (*CreateImageRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescGZIP(), []int{3} } -// UnimplementedImagesServer can be embedded to have forward compatible implementations. -type UnimplementedImagesServer struct { +func (x *CreateImageRequest) GetImage() *Image { + if x != nil { + return x.Image + } + return nil } -func (*UnimplementedImagesServer) Get(ctx context.Context, req *GetImageRequest) (*GetImageResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") -} -func (*UnimplementedImagesServer) List(ctx context.Context, req *ListImagesRequest) (*ListImagesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method List not implemented") -} -func (*UnimplementedImagesServer) Create(ctx context.Context, req *CreateImageRequest) (*CreateImageResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") -} -func (*UnimplementedImagesServer) Update(ctx context.Context, req *UpdateImageRequest) (*UpdateImageResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") -} -func (*UnimplementedImagesServer) Delete(ctx context.Context, req *DeleteImageRequest) (*types1.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +func (x *CreateImageRequest) GetSourceDateEpoch() *timestamppb.Timestamp { + if x != nil { + return x.SourceDateEpoch + } + return nil } -func RegisterImagesServer(s *grpc.Server, srv ImagesServer) { - s.RegisterService(&_Images_serviceDesc, srv) +type CreateImageResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Image *Image `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"` } -func _Images_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetImageRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ImagesServer).Get(ctx, in) +func (x *CreateImageResponse) Reset() { + *x = CreateImageResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.images.v1.Images/Get", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ImagesServer).Get(ctx, req.(*GetImageRequest)) - } - return interceptor(ctx, in, info, handler) } -func _Images_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListImagesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ImagesServer).List(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.images.v1.Images/List", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ImagesServer).List(ctx, req.(*ListImagesRequest)) - } - return interceptor(ctx, in, info, handler) +func (x *CreateImageResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func _Images_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateImageRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ImagesServer).Create(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.images.v1.Images/Create", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ImagesServer).Create(ctx, req.(*CreateImageRequest)) +func (*CreateImageResponse) ProtoMessage() {} + +func (x *CreateImageResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return interceptor(ctx, in, info, handler) + return mi.MessageOf(x) } -func _Images_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateImageRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ImagesServer).Update(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.images.v1.Images/Update", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ImagesServer).Update(ctx, req.(*UpdateImageRequest)) - } - return interceptor(ctx, in, info, handler) +// Deprecated: Use CreateImageResponse.ProtoReflect.Descriptor instead. +func (*CreateImageResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescGZIP(), []int{4} } -func _Images_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteImageRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ImagesServer).Delete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.images.v1.Images/Delete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ImagesServer).Delete(ctx, req.(*DeleteImageRequest)) +func (x *CreateImageResponse) GetImage() *Image { + if x != nil { + return x.Image } - return interceptor(ctx, in, info, handler) + return nil } -var _Images_serviceDesc = grpc.ServiceDesc{ - ServiceName: "containerd.services.images.v1.Images", - HandlerType: (*ImagesServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Get", - Handler: _Images_Get_Handler, - }, - { - MethodName: "List", - Handler: _Images_List_Handler, - }, - { - MethodName: "Create", - Handler: _Images_Create_Handler, - }, - { - MethodName: "Update", - Handler: _Images_Update_Handler, - }, - { - MethodName: "Delete", - Handler: _Images_Delete_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "github.com/containerd/containerd/api/services/images/v1/images.proto", +type UpdateImageRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Image provides a full or partial image for update. + // + // The name field must be set or an error will be returned. + Image *Image `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"` + // UpdateMask specifies which fields to perform the update on. If empty, + // the operation applies to all fields. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + SourceDateEpoch *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=source_date_epoch,json=sourceDateEpoch,proto3" json:"source_date_epoch,omitempty"` } -func (m *Image) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *UpdateImageRequest) Reset() { + *x = UpdateImageRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *Image) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *UpdateImageRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Image) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt):]) - if err1 != nil { - return 0, err1 - } - i -= n1 - i = encodeVarintImages(dAtA, i, uint64(n1)) - i-- - dAtA[i] = 0x42 - n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt):]) - if err2 != nil { - return 0, err2 - } - i -= n2 - i = encodeVarintImages(dAtA, i, uint64(n2)) - i-- - dAtA[i] = 0x3a - { - size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintImages(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintImages(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintImages(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintImages(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 +func (*UpdateImageRequest) ProtoMessage() {} + +func (x *UpdateImageRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintImages(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return mi.MessageOf(x) } -func (m *GetImageRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +// Deprecated: Use UpdateImageRequest.ProtoReflect.Descriptor instead. +func (*UpdateImageRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescGZIP(), []int{5} } -func (m *GetImageRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *UpdateImageRequest) GetImage() *Image { + if x != nil { + return x.Image + } + return nil } -func (m *GetImageRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintImages(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa +func (x *UpdateImageRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask } - return len(dAtA) - i, nil + return nil } -func (m *GetImageResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *UpdateImageRequest) GetSourceDateEpoch() *timestamppb.Timestamp { + if x != nil { + return x.SourceDateEpoch } - return dAtA[:n], nil + return nil } -func (m *GetImageResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +type UpdateImageResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *GetImageResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Image != nil { - { - size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintImages(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + Image *Image `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"` } -func (m *CreateImageRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *UpdateImageResponse) Reset() { + *x = UpdateImageResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *CreateImageRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *UpdateImageResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *CreateImageRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err +func (*UpdateImageResponse) ProtoMessage() {} + +func (x *UpdateImageResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - i -= size - i = encodeVarintImages(dAtA, i, uint64(size)) + return ms } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return mi.MessageOf(x) } -func (m *CreateImageResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +// Deprecated: Use UpdateImageResponse.ProtoReflect.Descriptor instead. +func (*UpdateImageResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescGZIP(), []int{6} } -func (m *CreateImageResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *UpdateImageResponse) GetImage() *Image { + if x != nil { + return x.Image + } + return nil } -func (m *CreateImageResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintImages(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil +type ListImagesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Filters contains one or more filters using the syntax defined in the + // containerd filter package. + // + // The returned result will be those that match any of the provided + // filters. Expanded, images that match the following will be + // returned: + // + // filters[0] or filters[1] or ... or filters[n-1] or filters[n] + // + // If filters is zero-length or nil, all items will be returned. + Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` } -func (m *UpdateImageRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *ListImagesRequest) Reset() { + *x = ListImagesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *UpdateImageRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *ListImagesRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *UpdateImageRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.UpdateMask != nil { - { - size, err := m.UpdateMask.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintImages(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err +func (*ListImagesRequest) ProtoMessage() {} + +func (x *ListImagesRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - i -= size - i = encodeVarintImages(dAtA, i, uint64(size)) + return ms } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return mi.MessageOf(x) } -func (m *UpdateImageResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +// Deprecated: Use ListImagesRequest.ProtoReflect.Descriptor instead. +func (*ListImagesRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescGZIP(), []int{7} } -func (m *UpdateImageResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *ListImagesRequest) GetFilters() []string { + if x != nil { + return x.Filters + } + return nil } -func (m *UpdateImageResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintImages(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil +type ListImagesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Images []*Image `protobuf:"bytes,1,rep,name=images,proto3" json:"images,omitempty"` } -func (m *ListImagesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *ListImagesResponse) Reset() { + *x = ListImagesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *ListImagesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *ListImagesResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ListImagesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Filters) > 0 { - for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Filters[iNdEx]) - copy(dAtA[i:], m.Filters[iNdEx]) - i = encodeVarintImages(dAtA, i, uint64(len(m.Filters[iNdEx]))) - i-- - dAtA[i] = 0xa +func (*ListImagesResponse) ProtoMessage() {} + +func (x *ListImagesResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } - return len(dAtA) - i, nil + return mi.MessageOf(x) } -func (m *ListImagesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +// Deprecated: Use ListImagesResponse.ProtoReflect.Descriptor instead. +func (*ListImagesResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescGZIP(), []int{8} } -func (m *ListImagesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *ListImagesResponse) GetImages() []*Image { + if x != nil { + return x.Images + } + return nil } -func (m *ListImagesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Images) > 0 { - for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintImages(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil +type DeleteImageRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Sync indicates that the delete and cleanup should be done + // synchronously before returning to the caller + // + // Default is false + Sync bool `protobuf:"varint,2,opt,name=sync,proto3" json:"sync,omitempty"` } -func (m *DeleteImageRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *DeleteImageRequest) Reset() { + *x = DeleteImageRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *DeleteImageRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *DeleteImageRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DeleteImageRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Sync { - i-- - if m.Sync { - dAtA[i] = 1 - } else { - dAtA[i] = 0 +func (*DeleteImageRequest) ProtoMessage() {} + +func (x *DeleteImageRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - i-- - dAtA[i] = 0x10 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintImages(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa + return ms } - return len(dAtA) - i, nil + return mi.MessageOf(x) } -func encodeVarintImages(dAtA []byte, offset int, v uint64) int { - offset -= sovImages(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Image) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovImages(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovImages(uint64(len(k))) + 1 + len(v) + sovImages(uint64(len(v))) - n += mapEntrySize + 1 + sovImages(uint64(mapEntrySize)) - } - } - l = m.Target.Size() - n += 1 + l + sovImages(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt) - n += 1 + l + sovImages(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt) - n += 1 + l + sovImages(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GetImageRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovImages(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GetImageResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Image != nil { - l = m.Image.Size() - n += 1 + l + sovImages(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CreateImageRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Image.Size() - n += 1 + l + sovImages(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CreateImageResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Image.Size() - n += 1 + l + sovImages(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UpdateImageRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Image.Size() - n += 1 + l + sovImages(uint64(l)) - if m.UpdateMask != nil { - l = m.UpdateMask.Size() - n += 1 + l + sovImages(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UpdateImageResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Image.Size() - n += 1 + l + sovImages(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListImagesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Filters) > 0 { - for _, s := range m.Filters { - l = len(s) - n += 1 + l + sovImages(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListImagesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Images) > 0 { - for _, e := range m.Images { - l = e.Size() - n += 1 + l + sovImages(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DeleteImageRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovImages(uint64(l)) - } - if m.Sync { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovImages(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozImages(x uint64) (n int) { - return sovImages(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Image) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&Image{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Labels:` + mapStringForLabels + `,`, - `Target:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Target), "Descriptor", "types.Descriptor", 1), `&`, ``, 1) + `,`, - `CreatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CreatedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`, - `UpdatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UpdatedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *GetImageRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GetImageRequest{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *GetImageResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GetImageResponse{`, - `Image:` + strings.Replace(this.Image.String(), "Image", "Image", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CreateImageRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CreateImageRequest{`, - `Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CreateImageResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CreateImageResponse{`, - `Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UpdateImageRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UpdateImageRequest{`, - `Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`, - `UpdateMask:` + strings.Replace(fmt.Sprintf("%v", this.UpdateMask), "FieldMask", "types1.FieldMask", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UpdateImageResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UpdateImageResponse{`, - `Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListImagesRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ListImagesRequest{`, - `Filters:` + fmt.Sprintf("%v", this.Filters) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListImagesResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForImages := "[]Image{" - for _, f := range this.Images { - repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "Image", "Image", 1), `&`, ``, 1) + "," - } - repeatedStringForImages += "}" - s := strings.Join([]string{`&ListImagesResponse{`, - `Images:` + repeatedStringForImages + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DeleteImageRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeleteImageRequest{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Sync:` + fmt.Sprintf("%v", this.Sync) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringImages(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Image) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Image: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Image: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthImages - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthImages - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthImages - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthImages - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipImages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImages - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipImages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetImageRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetImageRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetImageRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipImages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetImageResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetImageResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetImageResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Image == nil { - m.Image = &Image{} - } - if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipImages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CreateImageRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CreateImageRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CreateImageRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipImages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CreateImageResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CreateImageResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CreateImageResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipImages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UpdateImageRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateImageRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateImageRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateMask", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.UpdateMask == nil { - m.UpdateMask = &types1.FieldMask{} - } - if err := m.UpdateMask.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipImages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UpdateImageResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateImageResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateImageResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipImages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListImagesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListImagesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListImagesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipImages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListImagesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListImagesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListImagesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Images = append(m.Images, Image{}) - if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipImages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteImageRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteImageRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteImageRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthImages - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthImages - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Sync", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowImages - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Sync = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipImages(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthImages - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipImages(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowImages - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowImages - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowImages - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthImages - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupImages - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthImages - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF +// Deprecated: Use DeleteImageRequest.ProtoReflect.Descriptor instead. +func (*DeleteImageRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescGZIP(), []int{9} +} + +func (x *DeleteImageRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *DeleteImageRequest) GetSync() bool { + if x != nil { + return x.Sync + } + return false +} + +var File_github_com_containerd_containerd_api_services_images_v1_images_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDesc = []byte{ + 0x0a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, + 0x65, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0xcc, 0x02, 0x0a, 0x05, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x48, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x30, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x34, 0x0a, 0x06, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x44, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x25, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x4e, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x49, + 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x05, + 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x61, 0x67, + 0x65, 0x52, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x22, 0x98, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x3a, 0x0a, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, + 0x6d, 0x61, 0x67, 0x65, 0x52, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x46, 0x0a, 0x11, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x52, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x61, 0x74, 0x65, 0x45, 0x70, + 0x6f, 0x63, 0x68, 0x22, 0x51, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, + 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x05, 0x69, 0x6d, + 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, + 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, + 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x22, 0xd5, 0x01, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, + 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x61, + 0x67, 0x65, 0x52, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x46, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x5f, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0f, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x61, 0x74, 0x65, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x22, 0x51, + 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x05, 0x69, 0x6d, 0x61, 0x67, + 0x65, 0x22, 0x2d, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x22, 0x52, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x06, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, + 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x06, 0x69, 0x6d, + 0x61, 0x67, 0x65, 0x73, 0x22, 0x3c, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x6d, + 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x73, 0x79, 0x6e, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x73, 0x79, + 0x6e, 0x63, 0x32, 0x94, 0x04, 0x0a, 0x06, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x12, 0x66, 0x0a, + 0x03, 0x47, 0x65, 0x74, 0x12, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6b, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x30, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x31, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x31, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x31, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x69, 0x6d, 0x61, 0x67, 0x65, + 0x73, 0x2f, 0x76, 0x31, 0x3b, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( - ErrInvalidLengthImages = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowImages = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupImages = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescData = file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes = make([]protoimpl.MessageInfo, 11) +var file_github_com_containerd_containerd_api_services_images_v1_images_proto_goTypes = []interface{}{ + (*Image)(nil), // 0: containerd.services.images.v1.Image + (*GetImageRequest)(nil), // 1: containerd.services.images.v1.GetImageRequest + (*GetImageResponse)(nil), // 2: containerd.services.images.v1.GetImageResponse + (*CreateImageRequest)(nil), // 3: containerd.services.images.v1.CreateImageRequest + (*CreateImageResponse)(nil), // 4: containerd.services.images.v1.CreateImageResponse + (*UpdateImageRequest)(nil), // 5: containerd.services.images.v1.UpdateImageRequest + (*UpdateImageResponse)(nil), // 6: containerd.services.images.v1.UpdateImageResponse + (*ListImagesRequest)(nil), // 7: containerd.services.images.v1.ListImagesRequest + (*ListImagesResponse)(nil), // 8: containerd.services.images.v1.ListImagesResponse + (*DeleteImageRequest)(nil), // 9: containerd.services.images.v1.DeleteImageRequest + nil, // 10: containerd.services.images.v1.Image.LabelsEntry + (*types.Descriptor)(nil), // 11: containerd.types.Descriptor + (*timestamppb.Timestamp)(nil), // 12: google.protobuf.Timestamp + (*fieldmaskpb.FieldMask)(nil), // 13: google.protobuf.FieldMask + (*emptypb.Empty)(nil), // 14: google.protobuf.Empty +} +var file_github_com_containerd_containerd_api_services_images_v1_images_proto_depIdxs = []int32{ + 10, // 0: containerd.services.images.v1.Image.labels:type_name -> containerd.services.images.v1.Image.LabelsEntry + 11, // 1: containerd.services.images.v1.Image.target:type_name -> containerd.types.Descriptor + 12, // 2: containerd.services.images.v1.Image.created_at:type_name -> google.protobuf.Timestamp + 12, // 3: containerd.services.images.v1.Image.updated_at:type_name -> google.protobuf.Timestamp + 0, // 4: containerd.services.images.v1.GetImageResponse.image:type_name -> containerd.services.images.v1.Image + 0, // 5: containerd.services.images.v1.CreateImageRequest.image:type_name -> containerd.services.images.v1.Image + 12, // 6: containerd.services.images.v1.CreateImageRequest.source_date_epoch:type_name -> google.protobuf.Timestamp + 0, // 7: containerd.services.images.v1.CreateImageResponse.image:type_name -> containerd.services.images.v1.Image + 0, // 8: containerd.services.images.v1.UpdateImageRequest.image:type_name -> containerd.services.images.v1.Image + 13, // 9: containerd.services.images.v1.UpdateImageRequest.update_mask:type_name -> google.protobuf.FieldMask + 12, // 10: containerd.services.images.v1.UpdateImageRequest.source_date_epoch:type_name -> google.protobuf.Timestamp + 0, // 11: containerd.services.images.v1.UpdateImageResponse.image:type_name -> containerd.services.images.v1.Image + 0, // 12: containerd.services.images.v1.ListImagesResponse.images:type_name -> containerd.services.images.v1.Image + 1, // 13: containerd.services.images.v1.Images.Get:input_type -> containerd.services.images.v1.GetImageRequest + 7, // 14: containerd.services.images.v1.Images.List:input_type -> containerd.services.images.v1.ListImagesRequest + 3, // 15: containerd.services.images.v1.Images.Create:input_type -> containerd.services.images.v1.CreateImageRequest + 5, // 16: containerd.services.images.v1.Images.Update:input_type -> containerd.services.images.v1.UpdateImageRequest + 9, // 17: containerd.services.images.v1.Images.Delete:input_type -> containerd.services.images.v1.DeleteImageRequest + 2, // 18: containerd.services.images.v1.Images.Get:output_type -> containerd.services.images.v1.GetImageResponse + 8, // 19: containerd.services.images.v1.Images.List:output_type -> containerd.services.images.v1.ListImagesResponse + 4, // 20: containerd.services.images.v1.Images.Create:output_type -> containerd.services.images.v1.CreateImageResponse + 6, // 21: containerd.services.images.v1.Images.Update:output_type -> containerd.services.images.v1.UpdateImageResponse + 14, // 22: containerd.services.images.v1.Images.Delete:output_type -> google.protobuf.Empty + 18, // [18:23] is the sub-list for method output_type + 13, // [13:18] is the sub-list for method input_type + 13, // [13:13] is the sub-list for extension type_name + 13, // [13:13] is the sub-list for extension extendee + 0, // [0:13] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_services_images_v1_images_proto_init() } +func file_github_com_containerd_containerd_api_services_images_v1_images_proto_init() { + if File_github_com_containerd_containerd_api_services_images_v1_images_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Image); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetImageRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetImageResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateImageRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateImageResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateImageRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateImageResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListImagesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListImagesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteImageRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDesc, + NumEnums: 0, + NumMessages: 11, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_services_images_v1_images_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_images_v1_images_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_services_images_v1_images_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_images_v1_images_proto = out.File + file_github_com_containerd_containerd_api_services_images_v1_images_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_images_v1_images_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_images_v1_images_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/services/images/v1/images.proto b/vendor/github.com/containerd/containerd/api/services/images/v1/images.proto index 338f4fb08d78e..b32df4b051cf6 100644 --- a/vendor/github.com/containerd/containerd/api/services/images/v1/images.proto +++ b/vendor/github.com/containerd/containerd/api/services/images/v1/images.proto @@ -18,7 +18,6 @@ syntax = "proto3"; package containerd.services.images.v1; -import weak "gogoproto/gogo.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; import "google/protobuf/timestamp.proto"; @@ -71,13 +70,13 @@ message Image { map labels = 2; // Target describes the content entry point of the image. - containerd.types.Descriptor target = 3 [(gogoproto.nullable) = false]; + containerd.types.Descriptor target = 3; // CreatedAt is the time the image was first created. - google.protobuf.Timestamp created_at = 7 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp created_at = 7; // UpdatedAt is the last time the image was mutated. - google.protobuf.Timestamp updated_at = 8 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp updated_at = 8; } message GetImageRequest { @@ -89,26 +88,30 @@ message GetImageResponse { } message CreateImageRequest { - Image image = 1 [(gogoproto.nullable) = false]; + Image image = 1; + + google.protobuf.Timestamp source_date_epoch = 2; } message CreateImageResponse { - Image image = 1 [(gogoproto.nullable) = false]; + Image image = 1; } message UpdateImageRequest { // Image provides a full or partial image for update. // // The name field must be set or an error will be returned. - Image image = 1 [(gogoproto.nullable) = false]; + Image image = 1; // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. google.protobuf.FieldMask update_mask = 2; + + google.protobuf.Timestamp source_date_epoch = 3; } message UpdateImageResponse { - Image image = 1 [(gogoproto.nullable) = false]; + Image image = 1; } message ListImagesRequest { @@ -119,14 +122,14 @@ message ListImagesRequest { // filters. Expanded, images that match the following will be // returned: // - // filters[0] or filters[1] or ... or filters[n-1] or filters[n] + // filters[0] or filters[1] or ... or filters[n-1] or filters[n] // // If filters is zero-length or nil, all items will be returned. repeated string filters = 1; } message ListImagesResponse { - repeated Image images = 1 [(gogoproto.nullable) = false]; + repeated Image images = 1; } message DeleteImageRequest { diff --git a/vendor/github.com/containerd/containerd/api/services/images/v1/images_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/images/v1/images_grpc.pb.go new file mode 100644 index 0000000000000..86a4602e036a8 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/images/v1/images_grpc.pb.go @@ -0,0 +1,266 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/images/v1/images.proto + +package images + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// ImagesClient is the client API for Images service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ImagesClient interface { + // Get returns an image by name. + Get(ctx context.Context, in *GetImageRequest, opts ...grpc.CallOption) (*GetImageResponse, error) + // List returns a list of all images known to containerd. + List(ctx context.Context, in *ListImagesRequest, opts ...grpc.CallOption) (*ListImagesResponse, error) + // Create an image record in the metadata store. + // + // The name of the image must be unique. + Create(ctx context.Context, in *CreateImageRequest, opts ...grpc.CallOption) (*CreateImageResponse, error) + // Update assigns the name to a given target image based on the provided + // image. + Update(ctx context.Context, in *UpdateImageRequest, opts ...grpc.CallOption) (*UpdateImageResponse, error) + // Delete deletes the image by name. + Delete(ctx context.Context, in *DeleteImageRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) +} + +type imagesClient struct { + cc grpc.ClientConnInterface +} + +func NewImagesClient(cc grpc.ClientConnInterface) ImagesClient { + return &imagesClient{cc} +} + +func (c *imagesClient) Get(ctx context.Context, in *GetImageRequest, opts ...grpc.CallOption) (*GetImageResponse, error) { + out := new(GetImageResponse) + err := c.cc.Invoke(ctx, "/containerd.services.images.v1.Images/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *imagesClient) List(ctx context.Context, in *ListImagesRequest, opts ...grpc.CallOption) (*ListImagesResponse, error) { + out := new(ListImagesResponse) + err := c.cc.Invoke(ctx, "/containerd.services.images.v1.Images/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *imagesClient) Create(ctx context.Context, in *CreateImageRequest, opts ...grpc.CallOption) (*CreateImageResponse, error) { + out := new(CreateImageResponse) + err := c.cc.Invoke(ctx, "/containerd.services.images.v1.Images/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *imagesClient) Update(ctx context.Context, in *UpdateImageRequest, opts ...grpc.CallOption) (*UpdateImageResponse, error) { + out := new(UpdateImageResponse) + err := c.cc.Invoke(ctx, "/containerd.services.images.v1.Images/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *imagesClient) Delete(ctx context.Context, in *DeleteImageRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.images.v1.Images/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ImagesServer is the server API for Images service. +// All implementations must embed UnimplementedImagesServer +// for forward compatibility +type ImagesServer interface { + // Get returns an image by name. + Get(context.Context, *GetImageRequest) (*GetImageResponse, error) + // List returns a list of all images known to containerd. + List(context.Context, *ListImagesRequest) (*ListImagesResponse, error) + // Create an image record in the metadata store. + // + // The name of the image must be unique. + Create(context.Context, *CreateImageRequest) (*CreateImageResponse, error) + // Update assigns the name to a given target image based on the provided + // image. + Update(context.Context, *UpdateImageRequest) (*UpdateImageResponse, error) + // Delete deletes the image by name. + Delete(context.Context, *DeleteImageRequest) (*emptypb.Empty, error) + mustEmbedUnimplementedImagesServer() +} + +// UnimplementedImagesServer must be embedded to have forward compatible implementations. +type UnimplementedImagesServer struct { +} + +func (UnimplementedImagesServer) Get(context.Context, *GetImageRequest) (*GetImageResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (UnimplementedImagesServer) List(context.Context, *ListImagesRequest) (*ListImagesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method List not implemented") +} +func (UnimplementedImagesServer) Create(context.Context, *CreateImageRequest) (*CreateImageResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") +} +func (UnimplementedImagesServer) Update(context.Context, *UpdateImageRequest) (*UpdateImageResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") +} +func (UnimplementedImagesServer) Delete(context.Context, *DeleteImageRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (UnimplementedImagesServer) mustEmbedUnimplementedImagesServer() {} + +// UnsafeImagesServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ImagesServer will +// result in compilation errors. +type UnsafeImagesServer interface { + mustEmbedUnimplementedImagesServer() +} + +func RegisterImagesServer(s grpc.ServiceRegistrar, srv ImagesServer) { + s.RegisterService(&Images_ServiceDesc, srv) +} + +func _Images_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetImageRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ImagesServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.images.v1.Images/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ImagesServer).Get(ctx, req.(*GetImageRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Images_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListImagesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ImagesServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.images.v1.Images/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ImagesServer).List(ctx, req.(*ListImagesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Images_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateImageRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ImagesServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.images.v1.Images/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ImagesServer).Create(ctx, req.(*CreateImageRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Images_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateImageRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ImagesServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.images.v1.Images/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ImagesServer).Update(ctx, req.(*UpdateImageRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Images_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteImageRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ImagesServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.images.v1.Images/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ImagesServer).Delete(ctx, req.(*DeleteImageRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Images_ServiceDesc is the grpc.ServiceDesc for Images service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Images_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.images.v1.Images", + HandlerType: (*ImagesServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _Images_Get_Handler, + }, + { + MethodName: "List", + Handler: _Images_List_Handler, + }, + { + MethodName: "Create", + Handler: _Images_Create_Handler, + }, + { + MethodName: "Update", + Handler: _Images_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _Images_Delete_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/containerd/containerd/api/services/images/v1/images.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go index d23c8b61a8e22..aeef7c3a0253c 100644 --- a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go @@ -1,38 +1,48 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/services/introspection/v1/introspection.proto package introspection import ( - context "context" - fmt "fmt" types "github.com/containerd/containerd/api/types" - rpc "github.com/gogo/googleapis/google/rpc" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - types1 "github.com/gogo/protobuf/types" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" + status "google.golang.org/genproto/googleapis/rpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type Plugin struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Type defines the type of plugin. // // See package plugin for a list of possible values. Non core plugins may @@ -51,7 +61,7 @@ type Plugin struct { // // If the plugin prefers certain platforms over others, they should be // listed from most to least preferred. - Platforms []types.Platform `protobuf:"bytes,4,rep,name=platforms,proto3" json:"platforms"` + Platforms []*types.Platform `protobuf:"bytes,4,rep,name=platforms,proto3" json:"platforms,omitempty"` // Exports allows plugins to provide values about state or configuration to // interested parties. // @@ -69,1468 +79,445 @@ type Plugin struct { // was encountered during initialization. // // Plugins that have this value set cannot be used. - InitErr *rpc.Status `protobuf:"bytes,7,opt,name=init_err,json=initErr,proto3" json:"init_err,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + InitErr *status.Status `protobuf:"bytes,7,opt,name=init_err,json=initErr,proto3" json:"init_err,omitempty"` } -func (m *Plugin) Reset() { *m = Plugin{} } -func (*Plugin) ProtoMessage() {} -func (*Plugin) Descriptor() ([]byte, []int) { - return fileDescriptor_1a14fda866f10715, []int{0} -} -func (m *Plugin) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Plugin) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Plugin.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *Plugin) Reset() { + *x = Plugin{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *Plugin) XXX_Merge(src proto.Message) { - xxx_messageInfo_Plugin.Merge(m, src) -} -func (m *Plugin) XXX_Size() int { - return m.Size() -} -func (m *Plugin) XXX_DiscardUnknown() { - xxx_messageInfo_Plugin.DiscardUnknown(m) -} - -var xxx_messageInfo_Plugin proto.InternalMessageInfo - -type PluginsRequest struct { - // Filters contains one or more filters using the syntax defined in the - // containerd filter package. - // - // The returned result will be those that match any of the provided - // filters. Expanded, plugins that match the following will be - // returned: - // - // filters[0] or filters[1] or ... or filters[n-1] or filters[n] - // - // If filters is zero-length or nil, all items will be returned. - Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} -func (m *PluginsRequest) Reset() { *m = PluginsRequest{} } -func (*PluginsRequest) ProtoMessage() {} -func (*PluginsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_1a14fda866f10715, []int{1} +func (x *Plugin) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *PluginsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PluginsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PluginsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PluginsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PluginsRequest.Merge(m, src) -} -func (m *PluginsRequest) XXX_Size() int { - return m.Size() -} -func (m *PluginsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PluginsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_PluginsRequest proto.InternalMessageInfo -type PluginsResponse struct { - Plugins []Plugin `protobuf:"bytes,1,rep,name=plugins,proto3" json:"plugins"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} +func (*Plugin) ProtoMessage() {} -func (m *PluginsResponse) Reset() { *m = PluginsResponse{} } -func (*PluginsResponse) ProtoMessage() {} -func (*PluginsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_1a14fda866f10715, []int{2} -} -func (m *PluginsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PluginsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PluginsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (x *Plugin) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *PluginsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_PluginsResponse.Merge(m, src) -} -func (m *PluginsResponse) XXX_Size() int { - return m.Size() -} -func (m *PluginsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_PluginsResponse.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_PluginsResponse proto.InternalMessageInfo - -type ServerResponse struct { - UUID string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +// Deprecated: Use Plugin.ProtoReflect.Descriptor instead. +func (*Plugin) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDescGZIP(), []int{0} } -func (m *ServerResponse) Reset() { *m = ServerResponse{} } -func (*ServerResponse) ProtoMessage() {} -func (*ServerResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_1a14fda866f10715, []int{3} -} -func (m *ServerResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ServerResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *Plugin) GetType() string { + if x != nil { + return x.Type } + return "" } -func (m *ServerResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServerResponse.Merge(m, src) -} -func (m *ServerResponse) XXX_Size() int { - return m.Size() -} -func (m *ServerResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ServerResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ServerResponse proto.InternalMessageInfo -func init() { - proto.RegisterType((*Plugin)(nil), "containerd.services.introspection.v1.Plugin") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.introspection.v1.Plugin.ExportsEntry") - proto.RegisterType((*PluginsRequest)(nil), "containerd.services.introspection.v1.PluginsRequest") - proto.RegisterType((*PluginsResponse)(nil), "containerd.services.introspection.v1.PluginsResponse") - proto.RegisterType((*ServerResponse)(nil), "containerd.services.introspection.v1.ServerResponse") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/introspection/v1/introspection.proto", fileDescriptor_1a14fda866f10715) -} - -var fileDescriptor_1a14fda866f10715 = []byte{ - // 549 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0xad, 0x9d, 0x34, 0x6e, 0x37, 0xa5, 0xa0, 0x55, 0x55, 0x2c, 0x83, 0x9c, 0x28, 0xe2, 0x10, - 0x21, 0x58, 0xab, 0x01, 0x24, 0x5a, 0x24, 0x0e, 0x51, 0x73, 0x88, 0xd4, 0x43, 0xe5, 0xa8, 0x08, - 0x71, 0xa9, 0x1c, 0x67, 0x63, 0x56, 0x38, 0xde, 0xed, 0xee, 0xda, 0x22, 0x37, 0x3e, 0x2f, 0x47, - 0x8e, 0x9c, 0x02, 0xf5, 0x37, 0xf0, 0x01, 0xc8, 0xbb, 0x76, 0x9a, 0xdc, 0x12, 0x71, 0x9b, 0x79, - 0x33, 0x6f, 0xe6, 0xcd, 0xf3, 0xca, 0xc0, 0x8f, 0x88, 0xfc, 0x9a, 0x8e, 0x51, 0x48, 0x67, 0x5e, - 0x48, 0x13, 0x19, 0x90, 0x04, 0xf3, 0xc9, 0x7a, 0x18, 0x30, 0xe2, 0x09, 0xcc, 0x33, 0x12, 0x62, - 0xe1, 0x91, 0x44, 0x72, 0x2a, 0x18, 0x0e, 0x25, 0xa1, 0x89, 0x97, 0x9d, 0x6d, 0x02, 0x88, 0x71, - 0x2a, 0x29, 0x7c, 0xf1, 0xc0, 0x46, 0x15, 0x13, 0x6d, 0x36, 0x66, 0x67, 0xce, 0xf9, 0x56, 0x9b, - 0xe5, 0x9c, 0x61, 0xe1, 0xb1, 0x38, 0x90, 0x53, 0xca, 0x67, 0x7a, 0x81, 0xf3, 0x34, 0xa2, 0x34, - 0x8a, 0xb1, 0xc7, 0x59, 0xe8, 0x09, 0x19, 0xc8, 0x54, 0x94, 0x85, 0x67, 0x65, 0x41, 0x65, 0xe3, - 0x74, 0xea, 0xe1, 0x19, 0x93, 0xf3, 0xb2, 0x78, 0x12, 0xd1, 0x88, 0xaa, 0xd0, 0x2b, 0x22, 0x8d, - 0x76, 0xfe, 0x9a, 0xa0, 0x71, 0x1d, 0xa7, 0x11, 0x49, 0x20, 0x04, 0xf5, 0x62, 0x9d, 0x6d, 0xb4, - 0x8d, 0xee, 0xa1, 0xaf, 0x62, 0x78, 0x0a, 0x4c, 0x32, 0xb1, 0xcd, 0x02, 0xe9, 0x37, 0xf2, 0x65, - 0xcb, 0x1c, 0x5e, 0xfa, 0x26, 0x99, 0x40, 0x07, 0x1c, 0x70, 0x7c, 0x97, 0x12, 0x8e, 0x85, 0x5d, - 0x6b, 0xd7, 0xba, 0x87, 0xfe, 0x2a, 0x87, 0x1f, 0xc1, 0x61, 0x25, 0x58, 0xd8, 0xf5, 0x76, 0xad, - 0xdb, 0xec, 0x39, 0x68, 0xcd, 0x13, 0x75, 0x13, 0xba, 0x2e, 0x5b, 0xfa, 0xf5, 0xc5, 0xb2, 0xb5, - 0xe7, 0x3f, 0x50, 0xe0, 0x08, 0x58, 0xf8, 0x3b, 0xa3, 0x5c, 0x0a, 0x7b, 0x5f, 0xb1, 0xcf, 0xd1, - 0x36, 0x8e, 0x22, 0x7d, 0x06, 0x1a, 0x68, 0xee, 0x20, 0x91, 0x7c, 0xee, 0x57, 0x93, 0x60, 0x07, - 0x1c, 0x85, 0x01, 0x0b, 0xc6, 0x24, 0x26, 0x92, 0x60, 0x61, 0x37, 0x94, 0xe8, 0x0d, 0x0c, 0xbe, - 0x06, 0x07, 0x24, 0x21, 0xf2, 0x16, 0x73, 0x6e, 0x5b, 0x6d, 0xa3, 0xdb, 0xec, 0x41, 0xa4, 0x1d, - 0x45, 0x9c, 0x85, 0x68, 0xa4, 0xac, 0xf6, 0xad, 0xa2, 0x67, 0xc0, 0xb9, 0x73, 0x01, 0x8e, 0xd6, - 0x77, 0xc1, 0x27, 0xa0, 0xf6, 0x0d, 0xcf, 0x4b, 0xfb, 0x8a, 0x10, 0x9e, 0x80, 0xfd, 0x2c, 0x88, - 0x53, 0xac, 0x0d, 0xf4, 0x75, 0x72, 0x61, 0xbe, 0x37, 0x3a, 0x2f, 0xc1, 0xb1, 0x96, 0x2b, 0x7c, - 0x7c, 0x97, 0x62, 0x21, 0xa1, 0x0d, 0xac, 0x29, 0x89, 0x25, 0xe6, 0xc2, 0x36, 0x94, 0xb6, 0x2a, - 0xed, 0xdc, 0x82, 0xc7, 0xab, 0x5e, 0xc1, 0x68, 0x22, 0x30, 0xbc, 0x02, 0x16, 0xd3, 0x90, 0x6a, - 0x6e, 0xf6, 0x5e, 0xed, 0x62, 0x51, 0x69, 0x79, 0x35, 0xa2, 0x83, 0xc0, 0xf1, 0x08, 0xf3, 0x0c, - 0xf3, 0xd5, 0xfc, 0xe7, 0xa0, 0x9e, 0xa6, 0x64, 0xa2, 0x6f, 0xe9, 0x1f, 0xe4, 0xcb, 0x56, 0xfd, - 0xe6, 0x66, 0x78, 0xe9, 0x2b, 0xb4, 0xf7, 0xdb, 0x00, 0x8f, 0x86, 0xeb, 0xa3, 0x61, 0x06, 0xac, - 0x52, 0x22, 0x7c, 0xbb, 0x8b, 0x92, 0xea, 0x7a, 0xe7, 0xdd, 0x8e, 0xac, 0x52, 0xe7, 0x27, 0xd0, - 0xd0, 0xca, 0xe1, 0x69, 0xf5, 0xa5, 0xaa, 0xb7, 0x8f, 0x06, 0xc5, 0xdb, 0x77, 0xb6, 0x94, 0xb3, - 0x79, 0x7f, 0x7f, 0xba, 0xb8, 0x77, 0xf7, 0x7e, 0xdd, 0xbb, 0x7b, 0x3f, 0x72, 0xd7, 0x58, 0xe4, - 0xae, 0xf1, 0x33, 0x77, 0x8d, 0x3f, 0xb9, 0x6b, 0x7c, 0xb9, 0xfa, 0xbf, 0x1f, 0xc6, 0x87, 0x0d, - 0xe0, 0x73, 0x6d, 0xdc, 0x50, 0x7a, 0xdf, 0xfc, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x0c, 0xb3, 0x50, - 0xdc, 0x89, 0x04, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// IntrospectionClient is the client API for Introspection service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type IntrospectionClient interface { - // Plugins returns a list of plugins in containerd. - // - // Clients can use this to detect features and capabilities when using - // containerd. - Plugins(ctx context.Context, in *PluginsRequest, opts ...grpc.CallOption) (*PluginsResponse, error) - // Server returns information about the containerd server - Server(ctx context.Context, in *types1.Empty, opts ...grpc.CallOption) (*ServerResponse, error) -} - -type introspectionClient struct { - cc *grpc.ClientConn +func (x *Plugin) GetID() string { + if x != nil { + return x.ID + } + return "" } -func NewIntrospectionClient(cc *grpc.ClientConn) IntrospectionClient { - return &introspectionClient{cc} +func (x *Plugin) GetRequires() []string { + if x != nil { + return x.Requires + } + return nil } -func (c *introspectionClient) Plugins(ctx context.Context, in *PluginsRequest, opts ...grpc.CallOption) (*PluginsResponse, error) { - out := new(PluginsResponse) - err := c.cc.Invoke(ctx, "/containerd.services.introspection.v1.Introspection/Plugins", in, out, opts...) - if err != nil { - return nil, err +func (x *Plugin) GetPlatforms() []*types.Platform { + if x != nil { + return x.Platforms } - return out, nil + return nil } -func (c *introspectionClient) Server(ctx context.Context, in *types1.Empty, opts ...grpc.CallOption) (*ServerResponse, error) { - out := new(ServerResponse) - err := c.cc.Invoke(ctx, "/containerd.services.introspection.v1.Introspection/Server", in, out, opts...) - if err != nil { - return nil, err +func (x *Plugin) GetExports() map[string]string { + if x != nil { + return x.Exports } - return out, nil + return nil } -// IntrospectionServer is the server API for Introspection service. -type IntrospectionServer interface { - // Plugins returns a list of plugins in containerd. - // - // Clients can use this to detect features and capabilities when using - // containerd. - Plugins(context.Context, *PluginsRequest) (*PluginsResponse, error) - // Server returns information about the containerd server - Server(context.Context, *types1.Empty) (*ServerResponse, error) +func (x *Plugin) GetCapabilities() []string { + if x != nil { + return x.Capabilities + } + return nil } -// UnimplementedIntrospectionServer can be embedded to have forward compatible implementations. -type UnimplementedIntrospectionServer struct { +func (x *Plugin) GetInitErr() *status.Status { + if x != nil { + return x.InitErr + } + return nil } -func (*UnimplementedIntrospectionServer) Plugins(ctx context.Context, req *PluginsRequest) (*PluginsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Plugins not implemented") -} -func (*UnimplementedIntrospectionServer) Server(ctx context.Context, req *types1.Empty) (*ServerResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Server not implemented") -} +type PluginsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func RegisterIntrospectionServer(s *grpc.Server, srv IntrospectionServer) { - s.RegisterService(&_Introspection_serviceDesc, srv) + // Filters contains one or more filters using the syntax defined in the + // containerd filter package. + // + // The returned result will be those that match any of the provided + // filters. Expanded, plugins that match the following will be + // returned: + // + // filters[0] or filters[1] or ... or filters[n-1] or filters[n] + // + // If filters is zero-length or nil, all items will be returned. + Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` } -func _Introspection_Plugins_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PluginsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IntrospectionServer).Plugins(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.introspection.v1.Introspection/Plugins", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IntrospectionServer).Plugins(ctx, req.(*PluginsRequest)) +func (x *PluginsRequest) Reset() { + *x = PluginsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return interceptor(ctx, in, info, handler) } -func _Introspection_Server_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(types1.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(IntrospectionServer).Server(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.introspection.v1.Introspection/Server", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(IntrospectionServer).Server(ctx, req.(*types1.Empty)) - } - return interceptor(ctx, in, info, handler) +func (x *PluginsRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var _Introspection_serviceDesc = grpc.ServiceDesc{ - ServiceName: "containerd.services.introspection.v1.Introspection", - HandlerType: (*IntrospectionServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Plugins", - Handler: _Introspection_Plugins_Handler, - }, - { - MethodName: "Server", - Handler: _Introspection_Server_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "github.com/containerd/containerd/api/services/introspection/v1/introspection.proto", -} +func (*PluginsRequest) ProtoMessage() {} -func (m *Plugin) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *PluginsRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *Plugin) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use PluginsRequest.ProtoReflect.Descriptor instead. +func (*PluginsRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDescGZIP(), []int{1} } -func (m *Plugin) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.InitErr != nil { - { - size, err := m.InitErr.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - if len(m.Capabilities) > 0 { - for iNdEx := len(m.Capabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Capabilities[iNdEx]) - copy(dAtA[i:], m.Capabilities[iNdEx]) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Capabilities[iNdEx]))) - i-- - dAtA[i] = 0x32 - } - } - if len(m.Exports) > 0 { - for k := range m.Exports { - v := m.Exports[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintIntrospection(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintIntrospection(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintIntrospection(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x2a - } - } - if len(m.Platforms) > 0 { - for iNdEx := len(m.Platforms) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Platforms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.Requires) > 0 { - for iNdEx := len(m.Requires) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Requires[iNdEx]) - copy(dAtA[i:], m.Requires[iNdEx]) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Requires[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0x12 - } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa +func (x *PluginsRequest) GetFilters() []string { + if x != nil { + return x.Filters } - return len(dAtA) - i, nil + return nil } -func (m *PluginsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} +type PluginsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *PluginsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + Plugins []*Plugin `protobuf:"bytes,1,rep,name=plugins,proto3" json:"plugins,omitempty"` } -func (m *PluginsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Filters) > 0 { - for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Filters[iNdEx]) - copy(dAtA[i:], m.Filters[iNdEx]) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Filters[iNdEx]))) - i-- - dAtA[i] = 0xa - } +func (x *PluginsResponse) Reset() { + *x = PluginsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return len(dAtA) - i, nil } -func (m *PluginsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (x *PluginsResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *PluginsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +func (*PluginsResponse) ProtoMessage() {} -func (m *PluginsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Plugins) > 0 { - for iNdEx := len(m.Plugins) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Plugins[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintIntrospection(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa +func (x *PluginsResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } - return len(dAtA) - i, nil + return mi.MessageOf(x) } -func (m *ServerResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServerResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.UUID) > 0 { - i -= len(m.UUID) - copy(dAtA[i:], m.UUID) - i = encodeVarintIntrospection(dAtA, i, uint64(len(m.UUID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil +// Deprecated: Use PluginsResponse.ProtoReflect.Descriptor instead. +func (*PluginsResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDescGZIP(), []int{2} } -func encodeVarintIntrospection(dAtA []byte, offset int, v uint64) int { - offset -= sovIntrospection(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Plugin) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Type) - if l > 0 { - n += 1 + l + sovIntrospection(uint64(l)) - } - l = len(m.ID) - if l > 0 { - n += 1 + l + sovIntrospection(uint64(l)) - } - if len(m.Requires) > 0 { - for _, s := range m.Requires { - l = len(s) - n += 1 + l + sovIntrospection(uint64(l)) - } +func (x *PluginsResponse) GetPlugins() []*Plugin { + if x != nil { + return x.Plugins } - if len(m.Platforms) > 0 { - for _, e := range m.Platforms { - l = e.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - } - if len(m.Exports) > 0 { - for k, v := range m.Exports { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovIntrospection(uint64(len(k))) + 1 + len(v) + sovIntrospection(uint64(len(v))) - n += mapEntrySize + 1 + sovIntrospection(uint64(mapEntrySize)) - } - } - if len(m.Capabilities) > 0 { - for _, s := range m.Capabilities { - l = len(s) - n += 1 + l + sovIntrospection(uint64(l)) - } - } - if m.InitErr != nil { - l = m.InitErr.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n + return nil } -func (m *PluginsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Filters) > 0 { - for _, s := range m.Filters { - l = len(s) - n += 1 + l + sovIntrospection(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} +type ServerResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *PluginsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Plugins) > 0 { - for _, e := range m.Plugins { - l = e.Size() - n += 1 + l + sovIntrospection(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n + UUID string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + Pid uint64 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` + Pidns uint64 `protobuf:"varint,3,opt,name=pidns,proto3" json:"pidns,omitempty"` // PID namespace, such as 4026531836 } -func (m *ServerResponse) Size() (n int) { - if m == nil { - return 0 +func (x *ServerResponse) Reset() { + *x = ServerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - var l int - _ = l - l = len(m.UUID) - if l > 0 { - n += 1 + l + sovIntrospection(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n } -func sovIntrospection(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozIntrospection(x uint64) (n int) { - return sovIntrospection(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Plugin) String() string { - if this == nil { - return "nil" - } - repeatedStringForPlatforms := "[]Platform{" - for _, f := range this.Platforms { - repeatedStringForPlatforms += fmt.Sprintf("%v", f) + "," - } - repeatedStringForPlatforms += "}" - keysForExports := make([]string, 0, len(this.Exports)) - for k, _ := range this.Exports { - keysForExports = append(keysForExports, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForExports) - mapStringForExports := "map[string]string{" - for _, k := range keysForExports { - mapStringForExports += fmt.Sprintf("%v: %v,", k, this.Exports[k]) - } - mapStringForExports += "}" - s := strings.Join([]string{`&Plugin{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Requires:` + fmt.Sprintf("%v", this.Requires) + `,`, - `Platforms:` + repeatedStringForPlatforms + `,`, - `Exports:` + mapStringForExports + `,`, - `Capabilities:` + fmt.Sprintf("%v", this.Capabilities) + `,`, - `InitErr:` + strings.Replace(fmt.Sprintf("%v", this.InitErr), "Status", "rpc.Status", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *PluginsRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PluginsRequest{`, - `Filters:` + fmt.Sprintf("%v", this.Filters) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s +func (x *ServerResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (this *PluginsResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForPlugins := "[]Plugin{" - for _, f := range this.Plugins { - repeatedStringForPlugins += strings.Replace(strings.Replace(f.String(), "Plugin", "Plugin", 1), `&`, ``, 1) + "," - } - repeatedStringForPlugins += "}" - s := strings.Join([]string{`&PluginsResponse{`, - `Plugins:` + repeatedStringForPlugins + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ServerResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServerResponse{`, - `UUID:` + fmt.Sprintf("%v", this.UUID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringIntrospection(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Plugin) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Plugin: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Plugin: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Requires", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Requires = append(m.Requires, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Platforms", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Platforms = append(m.Platforms, types.Platform{}) - if err := m.Platforms[len(m.Platforms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exports", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Exports == nil { - m.Exports = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthIntrospection - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthIntrospection - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthIntrospection - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthIntrospection - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Exports[mapkey] = mapvalue - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Capabilities", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Capabilities = append(m.Capabilities, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitErr", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.InitErr == nil { - m.InitErr = &rpc.Status{} - } - if err := m.InitErr.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PluginsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } +func (*ServerResponse) ProtoMessage() {} - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PluginsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } +func (x *ServerResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Plugins", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Plugins = append(m.Plugins, Plugin{}) - if err := m.Plugins[len(m.Plugins)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF + return ms } - return nil + return mi.MessageOf(x) } -func (m *ServerResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServerResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServerResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UUID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowIntrospection - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthIntrospection - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthIntrospection - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UUID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipIntrospection(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthIntrospection - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipIntrospection(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowIntrospection - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowIntrospection - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowIntrospection - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthIntrospection - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupIntrospection - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthIntrospection - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF +// Deprecated: Use ServerResponse.ProtoReflect.Descriptor instead. +func (*ServerResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDescGZIP(), []int{3} +} + +func (x *ServerResponse) GetUUID() string { + if x != nil { + return x.UUID + } + return "" +} + +func (x *ServerResponse) GetPid() uint64 { + if x != nil { + return x.Pid + } + return 0 +} + +func (x *ServerResponse) GetPidns() uint64 { + if x != nil { + return x.Pidns + } + return 0 +} + +var File_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDesc = []byte{ + 0x0a, 0x52, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, + 0x2f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x24, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, + 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x1a, 0x39, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, + 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe6, 0x02, 0x0a, 0x06, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, + 0x72, 0x6d, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x6c, 0x61, + 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x09, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x73, + 0x12, 0x53, 0x0a, 0x07, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x39, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, + 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x78, + 0x70, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, + 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x70, + 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x2d, 0x0a, 0x08, 0x69, 0x6e, 0x69, + 0x74, 0x5f, 0x65, 0x72, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x07, 0x69, 0x6e, 0x69, 0x74, 0x45, 0x72, 0x72, 0x1a, 0x3a, 0x0a, 0x0c, 0x45, 0x78, 0x70, 0x6f, + 0x72, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x2a, 0x0a, 0x0e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x22, 0x59, 0x0a, 0x0f, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, 0x07, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72, 0x6f, + 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x52, 0x07, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x22, 0x4c, 0x0a, 0x0e, 0x53, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, + 0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, + 0x70, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x69, 0x64, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x05, 0x70, 0x69, 0x64, 0x6e, 0x73, 0x32, 0xdf, 0x01, 0x0a, 0x0d, 0x49, 0x6e, + 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x76, 0x0a, 0x07, 0x50, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x12, 0x34, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, + 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x56, 0x0a, 0x06, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x16, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x34, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x69, 0x6e, 0x74, 0x72, + 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x4e, 0x5a, 0x4c, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, + 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x69, 0x6e, 0x74, + 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x3b, 0x69, 0x6e, + 0x74, 0x72, 0x6f, 0x73, 0x70, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( - ErrInvalidLengthIntrospection = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowIntrospection = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupIntrospection = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDescData = file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_goTypes = []interface{}{ + (*Plugin)(nil), // 0: containerd.services.introspection.v1.Plugin + (*PluginsRequest)(nil), // 1: containerd.services.introspection.v1.PluginsRequest + (*PluginsResponse)(nil), // 2: containerd.services.introspection.v1.PluginsResponse + (*ServerResponse)(nil), // 3: containerd.services.introspection.v1.ServerResponse + nil, // 4: containerd.services.introspection.v1.Plugin.ExportsEntry + (*types.Platform)(nil), // 5: containerd.types.Platform + (*status.Status)(nil), // 6: google.rpc.Status + (*emptypb.Empty)(nil), // 7: google.protobuf.Empty +} +var file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_depIdxs = []int32{ + 5, // 0: containerd.services.introspection.v1.Plugin.platforms:type_name -> containerd.types.Platform + 4, // 1: containerd.services.introspection.v1.Plugin.exports:type_name -> containerd.services.introspection.v1.Plugin.ExportsEntry + 6, // 2: containerd.services.introspection.v1.Plugin.init_err:type_name -> google.rpc.Status + 0, // 3: containerd.services.introspection.v1.PluginsResponse.plugins:type_name -> containerd.services.introspection.v1.Plugin + 1, // 4: containerd.services.introspection.v1.Introspection.Plugins:input_type -> containerd.services.introspection.v1.PluginsRequest + 7, // 5: containerd.services.introspection.v1.Introspection.Server:input_type -> google.protobuf.Empty + 2, // 6: containerd.services.introspection.v1.Introspection.Plugins:output_type -> containerd.services.introspection.v1.PluginsResponse + 3, // 7: containerd.services.introspection.v1.Introspection.Server:output_type -> containerd.services.introspection.v1.ServerResponse + 6, // [6:8] is the sub-list for method output_type + 4, // [4:6] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { + file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_init() +} +func file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_init() { + if File_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Plugin); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PluginsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PluginsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ServerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDesc, + NumEnums: 0, + NumMessages: 5, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto = out.File + file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_introspection_v1_introspection_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto index 65a8bc21b6cd1..38864f1ec6d1f 100644 --- a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto +++ b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto @@ -21,7 +21,6 @@ package containerd.services.introspection.v1; import "github.com/containerd/containerd/api/types/platform.proto"; import "google/rpc/status.proto"; import "google/protobuf/empty.proto"; -import weak "gogoproto/gogo.proto"; option go_package = "github.com/containerd/containerd/api/services/introspection/v1;introspection"; @@ -57,7 +56,7 @@ message Plugin { // // If the plugin prefers certain platforms over others, they should be // listed from most to least preferred. - repeated types.Platform platforms = 4 [(gogoproto.nullable) = false]; + repeated types.Platform platforms = 4; // Exports allows plugins to provide values about state or configuration to // interested parties. @@ -89,16 +88,18 @@ message PluginsRequest { // filters. Expanded, plugins that match the following will be // returned: // - // filters[0] or filters[1] or ... or filters[n-1] or filters[n] + // filters[0] or filters[1] or ... or filters[n-1] or filters[n] // // If filters is zero-length or nil, all items will be returned. repeated string filters = 1; } message PluginsResponse { - repeated Plugin plugins = 1 [(gogoproto.nullable) = false]; + repeated Plugin plugins = 1; } message ServerResponse { - string uuid = 1 [(gogoproto.customname) = "UUID"]; + string uuid = 1; + uint64 pid = 2; + uint64 pidns = 3; // PID namespace, such as 4026531836 } diff --git a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection_grpc.pb.go new file mode 100644 index 0000000000000..c2cf80765ccb8 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection_grpc.pb.go @@ -0,0 +1,152 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/introspection/v1/introspection.proto + +package introspection + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// IntrospectionClient is the client API for Introspection service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type IntrospectionClient interface { + // Plugins returns a list of plugins in containerd. + // + // Clients can use this to detect features and capabilities when using + // containerd. + Plugins(ctx context.Context, in *PluginsRequest, opts ...grpc.CallOption) (*PluginsResponse, error) + // Server returns information about the containerd server + Server(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ServerResponse, error) +} + +type introspectionClient struct { + cc grpc.ClientConnInterface +} + +func NewIntrospectionClient(cc grpc.ClientConnInterface) IntrospectionClient { + return &introspectionClient{cc} +} + +func (c *introspectionClient) Plugins(ctx context.Context, in *PluginsRequest, opts ...grpc.CallOption) (*PluginsResponse, error) { + out := new(PluginsResponse) + err := c.cc.Invoke(ctx, "/containerd.services.introspection.v1.Introspection/Plugins", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *introspectionClient) Server(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ServerResponse, error) { + out := new(ServerResponse) + err := c.cc.Invoke(ctx, "/containerd.services.introspection.v1.Introspection/Server", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// IntrospectionServer is the server API for Introspection service. +// All implementations must embed UnimplementedIntrospectionServer +// for forward compatibility +type IntrospectionServer interface { + // Plugins returns a list of plugins in containerd. + // + // Clients can use this to detect features and capabilities when using + // containerd. + Plugins(context.Context, *PluginsRequest) (*PluginsResponse, error) + // Server returns information about the containerd server + Server(context.Context, *emptypb.Empty) (*ServerResponse, error) + mustEmbedUnimplementedIntrospectionServer() +} + +// UnimplementedIntrospectionServer must be embedded to have forward compatible implementations. +type UnimplementedIntrospectionServer struct { +} + +func (UnimplementedIntrospectionServer) Plugins(context.Context, *PluginsRequest) (*PluginsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Plugins not implemented") +} +func (UnimplementedIntrospectionServer) Server(context.Context, *emptypb.Empty) (*ServerResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Server not implemented") +} +func (UnimplementedIntrospectionServer) mustEmbedUnimplementedIntrospectionServer() {} + +// UnsafeIntrospectionServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to IntrospectionServer will +// result in compilation errors. +type UnsafeIntrospectionServer interface { + mustEmbedUnimplementedIntrospectionServer() +} + +func RegisterIntrospectionServer(s grpc.ServiceRegistrar, srv IntrospectionServer) { + s.RegisterService(&Introspection_ServiceDesc, srv) +} + +func _Introspection_Plugins_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PluginsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IntrospectionServer).Plugins(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.introspection.v1.Introspection/Plugins", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IntrospectionServer).Plugins(ctx, req.(*PluginsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Introspection_Server_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IntrospectionServer).Server(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.introspection.v1.Introspection/Server", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IntrospectionServer).Server(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +// Introspection_ServiceDesc is the grpc.ServiceDesc for Introspection service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Introspection_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.introspection.v1.Introspection", + HandlerType: (*IntrospectionServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Plugins", + Handler: _Introspection_Plugins_Handler, + }, + { + MethodName: "Server", + Handler: _Introspection_Server_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/containerd/containerd/api/services/introspection/v1/introspection.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go index 5e7cab71f1312..2a66f0f8b86b3 100644 --- a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.pb.go @@ -1,3108 +1,961 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/services/leases/v1/leases.proto package leases import ( - context "context" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types "github.com/gogo/protobuf/types" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - strings "strings" - time "time" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // Lease is an object which retains resources while it exists. type Lease struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - CreatedAt time.Time `protobuf:"bytes,2,opt,name=created_at,json=createdAt,proto3,stdtime" json:"created_at"` - Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Lease) Reset() { + *x = Lease{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Lease) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Lease) Reset() { *m = Lease{} } func (*Lease) ProtoMessage() {} -func (*Lease) Descriptor() ([]byte, []int) { - return fileDescriptor_fefd70dfe8d93cbf, []int{0} -} -func (m *Lease) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Lease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Lease.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *Lease) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *Lease) XXX_Merge(src proto.Message) { - xxx_messageInfo_Lease.Merge(m, src) + +// Deprecated: Use Lease.ProtoReflect.Descriptor instead. +func (*Lease) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescGZIP(), []int{0} } -func (m *Lease) XXX_Size() int { - return m.Size() + +func (x *Lease) GetID() string { + if x != nil { + return x.ID + } + return "" } -func (m *Lease) XXX_DiscardUnknown() { - xxx_messageInfo_Lease.DiscardUnknown(m) + +func (x *Lease) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil } -var xxx_messageInfo_Lease proto.InternalMessageInfo +func (x *Lease) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} type CreateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // ID is used to identity the lease, when the id is not set the service // generates a random identifier for the lease. - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *CreateRequest) Reset() { + *x = CreateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *CreateRequest) Reset() { *m = CreateRequest{} } func (*CreateRequest) ProtoMessage() {} -func (*CreateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_fefd70dfe8d93cbf, []int{1} -} -func (m *CreateRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CreateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CreateRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *CreateRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *CreateRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateRequest.Merge(m, src) -} -func (m *CreateRequest) XXX_Size() int { - return m.Size() + +// Deprecated: Use CreateRequest.ProtoReflect.Descriptor instead. +func (*CreateRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescGZIP(), []int{1} } -func (m *CreateRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CreateRequest.DiscardUnknown(m) + +func (x *CreateRequest) GetID() string { + if x != nil { + return x.ID + } + return "" } -var xxx_messageInfo_CreateRequest proto.InternalMessageInfo +func (x *CreateRequest) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} type CreateResponse struct { - Lease *Lease `protobuf:"bytes,1,opt,name=lease,proto3" json:"lease,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Lease *Lease `protobuf:"bytes,1,opt,name=lease,proto3" json:"lease,omitempty"` } -func (m *CreateResponse) Reset() { *m = CreateResponse{} } -func (*CreateResponse) ProtoMessage() {} -func (*CreateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_fefd70dfe8d93cbf, []int{2} -} -func (m *CreateResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CreateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CreateResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *CreateResponse) Reset() { + *x = CreateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *CreateResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateResponse.Merge(m, src) + +func (x *CreateResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *CreateResponse) XXX_Size() int { - return m.Size() + +func (*CreateResponse) ProtoMessage() {} + +func (x *CreateResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *CreateResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CreateResponse.DiscardUnknown(m) + +// Deprecated: Use CreateResponse.ProtoReflect.Descriptor instead. +func (*CreateResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescGZIP(), []int{2} } -var xxx_messageInfo_CreateResponse proto.InternalMessageInfo +func (x *CreateResponse) GetLease() *Lease { + if x != nil { + return x.Lease + } + return nil +} type DeleteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // Sync indicates that the delete and cleanup should be done // synchronously before returning to the caller // // Default is false - Sync bool `protobuf:"varint,2,opt,name=sync,proto3" json:"sync,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Sync bool `protobuf:"varint,2,opt,name=sync,proto3" json:"sync,omitempty"` } -func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } -func (*DeleteRequest) ProtoMessage() {} -func (*DeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_fefd70dfe8d93cbf, []int{3} -} -func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *DeleteRequest) Reset() { + *x = DeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *DeleteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteRequest.Merge(m, src) -} -func (m *DeleteRequest) XXX_Size() int { - return m.Size() -} -func (m *DeleteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteRequest.DiscardUnknown(m) -} -var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo - -type ListRequest struct { - Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *DeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ListRequest) Reset() { *m = ListRequest{} } -func (*ListRequest) ProtoMessage() {} -func (*ListRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_fefd70dfe8d93cbf, []int{4} -} -func (m *ListRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*DeleteRequest) ProtoMessage() {} + +func (x *DeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *ListRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListRequest.Merge(m, src) -} -func (m *ListRequest) XXX_Size() int { - return m.Size() -} -func (m *ListRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListRequest proto.InternalMessageInfo -type ListResponse struct { - Leases []*Lease `protobuf:"bytes,1,rep,name=leases,proto3" json:"leases,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +// Deprecated: Use DeleteRequest.ProtoReflect.Descriptor instead. +func (*DeleteRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescGZIP(), []int{3} } -func (m *ListResponse) Reset() { *m = ListResponse{} } -func (*ListResponse) ProtoMessage() {} -func (*ListResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_fefd70dfe8d93cbf, []int{5} -} -func (m *ListResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *DeleteRequest) GetID() string { + if x != nil { + return x.ID } + return "" } -func (m *ListResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListResponse.Merge(m, src) -} -func (m *ListResponse) XXX_Size() int { - return m.Size() -} -func (m *ListResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListResponse.DiscardUnknown(m) + +func (x *DeleteRequest) GetSync() bool { + if x != nil { + return x.Sync + } + return false } -var xxx_messageInfo_ListResponse proto.InternalMessageInfo +type ListRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -type Resource struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - // For snapshotter resource, there are many snapshotter types here, like - // overlayfs, devmapper etc. The type will be formatted with type, - // like "snapshotter/overlayfs". - Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` } -func (m *Resource) Reset() { *m = Resource{} } -func (*Resource) ProtoMessage() {} -func (*Resource) Descriptor() ([]byte, []int) { - return fileDescriptor_fefd70dfe8d93cbf, []int{6} -} -func (m *Resource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Resource.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ListRequest) Reset() { + *x = ListRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *Resource) XXX_Merge(src proto.Message) { - xxx_messageInfo_Resource.Merge(m, src) -} -func (m *Resource) XXX_Size() int { - return m.Size() -} -func (m *Resource) XXX_DiscardUnknown() { - xxx_messageInfo_Resource.DiscardUnknown(m) -} - -var xxx_messageInfo_Resource proto.InternalMessageInfo -type AddResourceRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Resource Resource `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *ListRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *AddResourceRequest) Reset() { *m = AddResourceRequest{} } -func (*AddResourceRequest) ProtoMessage() {} -func (*AddResourceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_fefd70dfe8d93cbf, []int{7} -} -func (m *AddResourceRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AddResourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AddResourceRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*ListRequest) ProtoMessage() {} + +func (x *ListRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *AddResourceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddResourceRequest.Merge(m, src) -} -func (m *AddResourceRequest) XXX_Size() int { - return m.Size() -} -func (m *AddResourceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AddResourceRequest.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_AddResourceRequest proto.InternalMessageInfo - -type DeleteResourceRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Resource Resource `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +// Deprecated: Use ListRequest.ProtoReflect.Descriptor instead. +func (*ListRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescGZIP(), []int{4} } -func (m *DeleteResourceRequest) Reset() { *m = DeleteResourceRequest{} } -func (*DeleteResourceRequest) ProtoMessage() {} -func (*DeleteResourceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_fefd70dfe8d93cbf, []int{8} -} -func (m *DeleteResourceRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteResourceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteResourceRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ListRequest) GetFilters() []string { + if x != nil { + return x.Filters } -} -func (m *DeleteResourceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteResourceRequest.Merge(m, src) -} -func (m *DeleteResourceRequest) XXX_Size() int { - return m.Size() -} -func (m *DeleteResourceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteResourceRequest.DiscardUnknown(m) + return nil } -var xxx_messageInfo_DeleteResourceRequest proto.InternalMessageInfo +type ListResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -type ListResourcesRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Leases []*Lease `protobuf:"bytes,1,rep,name=leases,proto3" json:"leases,omitempty"` } -func (m *ListResourcesRequest) Reset() { *m = ListResourcesRequest{} } -func (*ListResourcesRequest) ProtoMessage() {} -func (*ListResourcesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_fefd70dfe8d93cbf, []int{9} -} -func (m *ListResourcesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListResourcesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListResourcesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ListResponse) Reset() { + *x = ListResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ListResourcesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListResourcesRequest.Merge(m, src) -} -func (m *ListResourcesRequest) XXX_Size() int { - return m.Size() -} -func (m *ListResourcesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListResourcesRequest.DiscardUnknown(m) -} -var xxx_messageInfo_ListResourcesRequest proto.InternalMessageInfo - -type ListResourcesResponse struct { - Resources []Resource `protobuf:"bytes,1,rep,name=resources,proto3" json:"resources"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *ListResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ListResourcesResponse) Reset() { *m = ListResourcesResponse{} } -func (*ListResourcesResponse) ProtoMessage() {} -func (*ListResourcesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_fefd70dfe8d93cbf, []int{10} -} -func (m *ListResourcesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListResourcesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListResourcesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*ListResponse) ProtoMessage() {} + +func (x *ListResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil - } -} -func (m *ListResourcesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListResourcesResponse.Merge(m, src) -} -func (m *ListResourcesResponse) XXX_Size() int { - return m.Size() -} -func (m *ListResourcesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListResourcesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ListResourcesResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Lease)(nil), "containerd.services.leases.v1.Lease") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.leases.v1.Lease.LabelsEntry") - proto.RegisterType((*CreateRequest)(nil), "containerd.services.leases.v1.CreateRequest") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.leases.v1.CreateRequest.LabelsEntry") - proto.RegisterType((*CreateResponse)(nil), "containerd.services.leases.v1.CreateResponse") - proto.RegisterType((*DeleteRequest)(nil), "containerd.services.leases.v1.DeleteRequest") - proto.RegisterType((*ListRequest)(nil), "containerd.services.leases.v1.ListRequest") - proto.RegisterType((*ListResponse)(nil), "containerd.services.leases.v1.ListResponse") - proto.RegisterType((*Resource)(nil), "containerd.services.leases.v1.Resource") - proto.RegisterType((*AddResourceRequest)(nil), "containerd.services.leases.v1.AddResourceRequest") - proto.RegisterType((*DeleteResourceRequest)(nil), "containerd.services.leases.v1.DeleteResourceRequest") - proto.RegisterType((*ListResourcesRequest)(nil), "containerd.services.leases.v1.ListResourcesRequest") - proto.RegisterType((*ListResourcesResponse)(nil), "containerd.services.leases.v1.ListResourcesResponse") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/leases/v1/leases.proto", fileDescriptor_fefd70dfe8d93cbf) -} - -var fileDescriptor_fefd70dfe8d93cbf = []byte{ - // 644 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xce, 0x26, 0xa9, 0x49, 0x26, 0xb4, 0x42, 0xab, 0xb6, 0x8a, 0x8c, 0x48, 0x22, 0x0b, 0xa9, - 0x11, 0x3f, 0x36, 0x4d, 0x2b, 0x54, 0x5a, 0x84, 0xd4, 0xb4, 0x95, 0xa8, 0x88, 0x10, 0xb2, 0x38, - 0x54, 0x1c, 0xa8, 0x1c, 0x7b, 0x1b, 0x2c, 0x9c, 0xd8, 0x78, 0x37, 0x41, 0xe9, 0x89, 0x47, 0xe0, - 0x61, 0x78, 0x88, 0x1e, 0x39, 0x21, 0x4e, 0x85, 0xe6, 0xc6, 0x5b, 0x20, 0xef, 0x0f, 0x6d, 0x5a, - 0xb5, 0x76, 0x11, 0xe2, 0x36, 0x1b, 0x7f, 0xdf, 0xcc, 0x37, 0x33, 0xdf, 0x6e, 0x60, 0xbb, 0xe7, - 0xb3, 0x77, 0xc3, 0xae, 0xe9, 0x86, 0x7d, 0xcb, 0x0d, 0x07, 0xcc, 0xf1, 0x07, 0x24, 0xf6, 0xce, - 0x86, 0x4e, 0xe4, 0x5b, 0x94, 0xc4, 0x23, 0xdf, 0x25, 0xd4, 0x0a, 0x88, 0x43, 0x09, 0xb5, 0x46, - 0xcb, 0x32, 0x32, 0xa3, 0x38, 0x64, 0x21, 0xbe, 0x73, 0x8a, 0x37, 0x15, 0xd6, 0x94, 0x88, 0xd1, - 0xb2, 0x3e, 0xdf, 0x0b, 0x7b, 0x21, 0x47, 0x5a, 0x49, 0x24, 0x48, 0xfa, 0xed, 0x5e, 0x18, 0xf6, - 0x02, 0x62, 0xf1, 0x53, 0x77, 0x78, 0x60, 0x91, 0x7e, 0xc4, 0xc6, 0xf2, 0x63, 0xfd, 0xfc, 0x47, - 0xe6, 0xf7, 0x09, 0x65, 0x4e, 0x3f, 0x12, 0x00, 0xe3, 0x17, 0x82, 0x99, 0x4e, 0x52, 0x01, 0x2f, - 0x42, 0xde, 0xf7, 0xaa, 0xa8, 0x81, 0x9a, 0xe5, 0xb6, 0x36, 0x39, 0xae, 0xe7, 0x77, 0xb7, 0xed, - 0xbc, 0xef, 0xe1, 0x2d, 0x00, 0x37, 0x26, 0x0e, 0x23, 0xde, 0xbe, 0xc3, 0xaa, 0xf9, 0x06, 0x6a, - 0x56, 0x5a, 0xba, 0x29, 0xf2, 0x9a, 0x2a, 0xaf, 0xf9, 0x5a, 0xe5, 0x6d, 0x97, 0x8e, 0x8e, 0xeb, - 0xb9, 0xcf, 0x3f, 0xea, 0xc8, 0x2e, 0x4b, 0xde, 0x26, 0xc3, 0xcf, 0x41, 0x0b, 0x9c, 0x2e, 0x09, - 0x68, 0xb5, 0xd0, 0x28, 0x34, 0x2b, 0xad, 0x47, 0xe6, 0x95, 0xad, 0x9a, 0x5c, 0x92, 0xd9, 0xe1, - 0x94, 0x9d, 0x01, 0x8b, 0xc7, 0xb6, 0xe4, 0xeb, 0x4f, 0xa0, 0x72, 0xe6, 0x67, 0x7c, 0x0b, 0x0a, - 0xef, 0xc9, 0x58, 0xc8, 0xb6, 0x93, 0x10, 0xcf, 0xc3, 0xcc, 0xc8, 0x09, 0x86, 0x84, 0x4b, 0x2d, - 0xdb, 0xe2, 0xb0, 0x9e, 0x5f, 0x43, 0xc6, 0x17, 0x04, 0xb3, 0x5b, 0x5c, 0x92, 0x4d, 0x3e, 0x0c, - 0x09, 0x65, 0x97, 0xf6, 0xfc, 0xea, 0x9c, 0xdc, 0xb5, 0x14, 0xb9, 0x53, 0x59, 0xff, 0xb5, 0xec, - 0x0e, 0xcc, 0xa9, 0xfc, 0x34, 0x0a, 0x07, 0x94, 0xe0, 0x75, 0x98, 0xe1, 0xb5, 0x39, 0xbf, 0xd2, - 0xba, 0x9b, 0x65, 0x98, 0xb6, 0xa0, 0x18, 0x1b, 0x30, 0xbb, 0x4d, 0x02, 0x92, 0x3e, 0x03, 0x0c, - 0x45, 0x3a, 0x1e, 0xb8, 0x5c, 0x4f, 0xc9, 0xe6, 0xb1, 0xb1, 0x04, 0x95, 0x8e, 0x4f, 0x99, 0xa2, - 0x56, 0xe1, 0xc6, 0x81, 0x1f, 0x30, 0x12, 0xd3, 0x2a, 0x6a, 0x14, 0x9a, 0x65, 0x5b, 0x1d, 0x8d, - 0x0e, 0xdc, 0x14, 0x40, 0xa9, 0xf8, 0x29, 0x68, 0x42, 0x0f, 0x07, 0x66, 0x95, 0x2c, 0x39, 0xc6, - 0x63, 0x28, 0xd9, 0x84, 0x86, 0xc3, 0xd8, 0x25, 0x57, 0xc9, 0x65, 0xe3, 0x48, 0x8d, 0x8f, 0xc7, - 0xc6, 0x47, 0xc0, 0x9b, 0x9e, 0xa7, 0xa8, 0x69, 0x0d, 0xef, 0x42, 0x29, 0x96, 0x50, 0x69, 0xf3, - 0xa5, 0x14, 0x95, 0x2a, 0x73, 0xbb, 0x98, 0x78, 0xde, 0xfe, 0x43, 0x37, 0x0e, 0x61, 0x41, 0x0d, - 0xf9, 0xbf, 0xd7, 0x36, 0x61, 0x5e, 0x8e, 0x9e, 0x9f, 0x69, 0x4a, 0x69, 0xc3, 0x83, 0x85, 0x73, - 0x78, 0xb9, 0xb3, 0x17, 0x50, 0x56, 0x49, 0xd5, 0xda, 0xae, 0x29, 0xea, 0x94, 0xdf, 0xfa, 0x56, - 0x04, 0x8d, 0x2f, 0x95, 0x62, 0x02, 0x9a, 0xf0, 0x33, 0x7e, 0x70, 0x9d, 0x6b, 0xa5, 0x3f, 0xcc, - 0x88, 0x96, 0xf2, 0x5f, 0x82, 0x26, 0x76, 0x90, 0x5a, 0x66, 0xea, 0x3e, 0xe8, 0x8b, 0x17, 0xde, - 0xb6, 0x9d, 0xe4, 0x41, 0xc5, 0xfb, 0x50, 0x4c, 0xe6, 0x84, 0xef, 0xa5, 0x59, 0xf7, 0xf4, 0x82, - 0xe8, 0xf7, 0x33, 0x61, 0xa5, 0xe0, 0x3d, 0xa8, 0x9c, 0x71, 0x2b, 0x5e, 0x4e, 0xe1, 0x5e, 0x74, - 0xf6, 0xa5, 0xd2, 0xdf, 0xc2, 0xdc, 0xb4, 0x1d, 0xf1, 0x6a, 0xc6, 0x91, 0x64, 0xcb, 0x7f, 0x08, - 0xb3, 0x53, 0x16, 0xc2, 0x2b, 0xd9, 0xfa, 0x9e, 0x32, 0xa8, 0xbe, 0x7a, 0x3d, 0x92, 0x98, 0x5a, - 0x7b, 0xef, 0xe8, 0xa4, 0x96, 0xfb, 0x7e, 0x52, 0xcb, 0x7d, 0x9a, 0xd4, 0xd0, 0xd1, 0xa4, 0x86, - 0xbe, 0x4e, 0x6a, 0xe8, 0xe7, 0xa4, 0x86, 0xde, 0x3c, 0xfb, 0xcb, 0xff, 0xe4, 0x0d, 0x11, 0xed, - 0xe5, 0xba, 0x1a, 0xef, 0x73, 0xe5, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0d, 0xfe, 0x39, 0x67, - 0xde, 0x07, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// LeasesClient is the client API for Leases service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type LeasesClient interface { - // Create creates a new lease for managing changes to metadata. A lease - // can be used to protect objects from being removed. - Create(ctx context.Context, in *CreateRequest, opts ...grpc.CallOption) (*CreateResponse, error) - // Delete deletes the lease and makes any unreferenced objects created - // during the lease eligible for garbage collection if not referenced - // or retained by other resources during the lease. - Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*types.Empty, error) - // List lists all active leases, returning the full list of - // leases and optionally including the referenced resources. - List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error) - // AddResource references the resource by the provided lease. - AddResource(ctx context.Context, in *AddResourceRequest, opts ...grpc.CallOption) (*types.Empty, error) - // DeleteResource dereferences the resource by the provided lease. - DeleteResource(ctx context.Context, in *DeleteResourceRequest, opts ...grpc.CallOption) (*types.Empty, error) - // ListResources lists all the resources referenced by the lease. - ListResources(ctx context.Context, in *ListResourcesRequest, opts ...grpc.CallOption) (*ListResourcesResponse, error) -} - -type leasesClient struct { - cc *grpc.ClientConn -} - -func NewLeasesClient(cc *grpc.ClientConn) LeasesClient { - return &leasesClient{cc} -} - -func (c *leasesClient) Create(ctx context.Context, in *CreateRequest, opts ...grpc.CallOption) (*CreateResponse, error) { - out := new(CreateResponse) - err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/Create", in, out, opts...) - if err != nil { - return nil, err + return ms } - return out, nil + return mi.MessageOf(x) } -func (c *leasesClient) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*types.Empty, error) { - out := new(types.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/Delete", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +// Deprecated: Use ListResponse.ProtoReflect.Descriptor instead. +func (*ListResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescGZIP(), []int{5} } -func (c *leasesClient) List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error) { - out := new(ListResponse) - err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/List", in, out, opts...) - if err != nil { - return nil, err +func (x *ListResponse) GetLeases() []*Lease { + if x != nil { + return x.Leases } - return out, nil + return nil } -func (c *leasesClient) AddResource(ctx context.Context, in *AddResourceRequest, opts ...grpc.CallOption) (*types.Empty, error) { - out := new(types.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/AddResource", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} +type Resource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (c *leasesClient) DeleteResource(ctx context.Context, in *DeleteResourceRequest, opts ...grpc.CallOption) (*types.Empty, error) { - out := new(types.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/DeleteResource", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // For snapshotter resource, there are many snapshotter types here, like + // overlayfs, devmapper etc. The type will be formatted with type, + // like "snapshotter/overlayfs". + Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` } -func (c *leasesClient) ListResources(ctx context.Context, in *ListResourcesRequest, opts ...grpc.CallOption) (*ListResourcesResponse, error) { - out := new(ListResourcesResponse) - err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/ListResources", in, out, opts...) - if err != nil { - return nil, err +func (x *Resource) Reset() { + *x = Resource{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return out, nil } -// LeasesServer is the server API for Leases service. -type LeasesServer interface { - // Create creates a new lease for managing changes to metadata. A lease - // can be used to protect objects from being removed. - Create(context.Context, *CreateRequest) (*CreateResponse, error) - // Delete deletes the lease and makes any unreferenced objects created - // during the lease eligible for garbage collection if not referenced - // or retained by other resources during the lease. - Delete(context.Context, *DeleteRequest) (*types.Empty, error) - // List lists all active leases, returning the full list of - // leases and optionally including the referenced resources. - List(context.Context, *ListRequest) (*ListResponse, error) - // AddResource references the resource by the provided lease. - AddResource(context.Context, *AddResourceRequest) (*types.Empty, error) - // DeleteResource dereferences the resource by the provided lease. - DeleteResource(context.Context, *DeleteResourceRequest) (*types.Empty, error) - // ListResources lists all the resources referenced by the lease. - ListResources(context.Context, *ListResourcesRequest) (*ListResourcesResponse, error) +func (x *Resource) String() string { + return protoimpl.X.MessageStringOf(x) } -// UnimplementedLeasesServer can be embedded to have forward compatible implementations. -type UnimplementedLeasesServer struct { -} +func (*Resource) ProtoMessage() {} -func (*UnimplementedLeasesServer) Create(ctx context.Context, req *CreateRequest) (*CreateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") -} -func (*UnimplementedLeasesServer) Delete(ctx context.Context, req *DeleteRequest) (*types.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") -} -func (*UnimplementedLeasesServer) List(ctx context.Context, req *ListRequest) (*ListResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method List not implemented") -} -func (*UnimplementedLeasesServer) AddResource(ctx context.Context, req *AddResourceRequest) (*types.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method AddResource not implemented") -} -func (*UnimplementedLeasesServer) DeleteResource(ctx context.Context, req *DeleteResourceRequest) (*types.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteResource not implemented") -} -func (*UnimplementedLeasesServer) ListResources(ctx context.Context, req *ListResourcesRequest) (*ListResourcesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListResources not implemented") +func (x *Resource) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func RegisterLeasesServer(s *grpc.Server, srv LeasesServer) { - s.RegisterService(&_Leases_serviceDesc, srv) +// Deprecated: Use Resource.ProtoReflect.Descriptor instead. +func (*Resource) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescGZIP(), []int{6} } -func _Leases_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LeasesServer).Create(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.leases.v1.Leases/Create", +func (x *Resource) GetID() string { + if x != nil { + return x.ID } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LeasesServer).Create(ctx, req.(*CreateRequest)) - } - return interceptor(ctx, in, info, handler) + return "" } -func _Leases_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LeasesServer).Delete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.leases.v1.Leases/Delete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LeasesServer).Delete(ctx, req.(*DeleteRequest)) +func (x *Resource) GetType() string { + if x != nil { + return x.Type } - return interceptor(ctx, in, info, handler) + return "" } -func _Leases_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LeasesServer).List(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.leases.v1.Leases/List", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LeasesServer).List(ctx, req.(*ListRequest)) - } - return interceptor(ctx, in, info, handler) -} +type AddResourceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func _Leases_AddResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AddResourceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LeasesServer).AddResource(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.leases.v1.Leases/AddResource", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LeasesServer).AddResource(ctx, req.(*AddResourceRequest)) - } - return interceptor(ctx, in, info, handler) + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Resource *Resource `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"` } -func _Leases_DeleteResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteResourceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LeasesServer).DeleteResource(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.leases.v1.Leases/DeleteResource", +func (x *AddResourceRequest) Reset() { + *x = AddResourceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LeasesServer).DeleteResource(ctx, req.(*DeleteResourceRequest)) - } - return interceptor(ctx, in, info, handler) } -func _Leases_ListResources_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListResourcesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LeasesServer).ListResources(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.leases.v1.Leases/ListResources", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LeasesServer).ListResources(ctx, req.(*ListResourcesRequest)) - } - return interceptor(ctx, in, info, handler) +func (x *AddResourceRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var _Leases_serviceDesc = grpc.ServiceDesc{ - ServiceName: "containerd.services.leases.v1.Leases", - HandlerType: (*LeasesServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Create", - Handler: _Leases_Create_Handler, - }, - { - MethodName: "Delete", - Handler: _Leases_Delete_Handler, - }, - { - MethodName: "List", - Handler: _Leases_List_Handler, - }, - { - MethodName: "AddResource", - Handler: _Leases_AddResource_Handler, - }, - { - MethodName: "DeleteResource", - Handler: _Leases_DeleteResource_Handler, - }, - { - MethodName: "ListResources", - Handler: _Leases_ListResources_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "github.com/containerd/containerd/api/services/leases/v1/leases.proto", -} +func (*AddResourceRequest) ProtoMessage() {} -func (m *Lease) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *AddResourceRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *Lease) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use AddResourceRequest.ProtoReflect.Descriptor instead. +func (*AddResourceRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescGZIP(), []int{7} } -func (m *Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintLeases(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintLeases(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintLeases(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a - } - } - n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt):]) - if err1 != nil { - return 0, err1 +func (x *AddResourceRequest) GetID() string { + if x != nil { + return x.ID } - i -= n1 - i = encodeVarintLeases(dAtA, i, uint64(n1)) - i-- - dAtA[i] = 0x12 - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintLeases(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return "" } -func (m *CreateRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *AddResourceRequest) GetResource() *Resource { + if x != nil { + return x.Resource } - return dAtA[:n], nil + return nil } -func (m *CreateRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +type DeleteResourceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *CreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintLeases(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintLeases(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintLeases(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a - } - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintLeases(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Resource *Resource `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"` } -func (m *CreateResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *DeleteResourceRequest) Reset() { + *x = DeleteResourceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *CreateResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *DeleteResourceRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *CreateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Lease != nil { - { - size, err := m.Lease.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLeases(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} +func (*DeleteResourceRequest) ProtoMessage() {} -func (m *DeleteRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *DeleteResourceRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *DeleteRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use DeleteResourceRequest.ProtoReflect.Descriptor instead. +func (*DeleteResourceRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescGZIP(), []int{8} } -func (m *DeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Sync { - i-- - if m.Sync { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 +func (x *DeleteResourceRequest) GetID() string { + if x != nil { + return x.ID } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintLeases(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return "" } -func (m *ListRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *DeleteResourceRequest) GetResource() *Resource { + if x != nil { + return x.Resource } - return dAtA[:n], nil + return nil } -func (m *ListRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +type ListResourcesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *ListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Filters) > 0 { - for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Filters[iNdEx]) - copy(dAtA[i:], m.Filters[iNdEx]) - i = encodeVarintLeases(dAtA, i, uint64(len(m.Filters[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` } -func (m *ListResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *ListResourcesRequest) Reset() { + *x = ListResourcesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *ListResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *ListResourcesRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Leases) > 0 { - for iNdEx := len(m.Leases) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Leases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLeases(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} +func (*ListResourcesRequest) ProtoMessage() {} -func (m *Resource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *ListResourcesRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *Resource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use ListResourcesRequest.ProtoReflect.Descriptor instead. +func (*ListResourcesRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescGZIP(), []int{9} } -func (m *Resource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) +func (x *ListResourcesRequest) GetID() string { + if x != nil { + return x.ID } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintLeases(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintLeases(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return "" } -func (m *AddResourceRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} +type ListResourcesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *AddResourceRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + Resources []*Resource `protobuf:"bytes,1,rep,name=resources,proto3" json:"resources,omitempty"` } -func (m *AddResourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLeases(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintLeases(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa +func (x *ListResourcesResponse) Reset() { + *x = ListResourcesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return len(dAtA) - i, nil } -func (m *DeleteResourceRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (x *ListResourcesResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DeleteResourceRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +func (*ListResourcesResponse) ProtoMessage() {} -func (m *DeleteResourceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err +func (x *ListResourcesResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - i -= size - i = encodeVarintLeases(dAtA, i, uint64(size)) + return ms } - i-- - dAtA[i] = 0x12 - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintLeases(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ListResourcesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *ListResourcesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListResourcesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintLeases(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil +// Deprecated: Use ListResourcesResponse.ProtoReflect.Descriptor instead. +func (*ListResourcesResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescGZIP(), []int{10} } -func (m *ListResourcesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *ListResourcesResponse) GetResources() []*Resource { + if x != nil { + return x.Resources } - return dAtA[:n], nil + return nil } -func (m *ListResourcesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +var File_github_com_containerd_containerd_api_services_leases_v1_leases_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDesc = []byte{ + 0x0a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, + 0x65, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xd7, 0x01, 0x0a, 0x05, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x39, 0x0a, + 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x48, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6c, + 0x65, 0x61, 0x73, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x2e, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xac, 0x01, + 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, + 0x50, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x38, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4c, 0x0a, 0x0e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, + 0x0a, 0x05, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x65, + 0x61, 0x73, 0x65, 0x52, 0x05, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x22, 0x33, 0x0a, 0x0d, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x73, + 0x79, 0x6e, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x73, 0x79, 0x6e, 0x63, 0x22, + 0x27, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, + 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x22, 0x4c, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x06, 0x6c, 0x65, 0x61, 0x73, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6c, + 0x65, 0x61, 0x73, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x06, + 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, 0x22, 0x2e, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x69, 0x0a, 0x12, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x43, 0x0a, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x22, 0x6c, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x43, 0x0a, 0x08, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, + 0x26, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x22, 0x5e, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x45, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x32, 0xd6, 0x04, 0x0a, 0x06, 0x4c, 0x65, 0x61, 0x73, + 0x65, 0x73, 0x12, 0x65, 0x0a, 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x2c, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x06, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x12, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x5f, 0x0a, 0x04, 0x4c, 0x69, 0x73, + 0x74, 0x12, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x0b, 0x41, 0x64, + 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, + 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x12, 0x5e, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x34, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, + 0x73, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x12, 0x7a, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x33, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6c, 0x65, 0x61, 0x73, + 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2e, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2f, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x73, 0x2f, 0x76, 0x31, 0x3b, 0x6c, 0x65, 0x61, 0x73, + 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func (m *ListResourcesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Resources) > 0 { - for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLeases(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} +var ( + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescData = file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDesc +) -func encodeVarintLeases(dAtA []byte, offset int, v uint64) int { - offset -= sovLeases(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base +func file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes = make([]protoimpl.MessageInfo, 13) +var file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_goTypes = []interface{}{ + (*Lease)(nil), // 0: containerd.services.leases.v1.Lease + (*CreateRequest)(nil), // 1: containerd.services.leases.v1.CreateRequest + (*CreateResponse)(nil), // 2: containerd.services.leases.v1.CreateResponse + (*DeleteRequest)(nil), // 3: containerd.services.leases.v1.DeleteRequest + (*ListRequest)(nil), // 4: containerd.services.leases.v1.ListRequest + (*ListResponse)(nil), // 5: containerd.services.leases.v1.ListResponse + (*Resource)(nil), // 6: containerd.services.leases.v1.Resource + (*AddResourceRequest)(nil), // 7: containerd.services.leases.v1.AddResourceRequest + (*DeleteResourceRequest)(nil), // 8: containerd.services.leases.v1.DeleteResourceRequest + (*ListResourcesRequest)(nil), // 9: containerd.services.leases.v1.ListResourcesRequest + (*ListResourcesResponse)(nil), // 10: containerd.services.leases.v1.ListResourcesResponse + nil, // 11: containerd.services.leases.v1.Lease.LabelsEntry + nil, // 12: containerd.services.leases.v1.CreateRequest.LabelsEntry + (*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp + (*emptypb.Empty)(nil), // 14: google.protobuf.Empty +} +var file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_depIdxs = []int32{ + 13, // 0: containerd.services.leases.v1.Lease.created_at:type_name -> google.protobuf.Timestamp + 11, // 1: containerd.services.leases.v1.Lease.labels:type_name -> containerd.services.leases.v1.Lease.LabelsEntry + 12, // 2: containerd.services.leases.v1.CreateRequest.labels:type_name -> containerd.services.leases.v1.CreateRequest.LabelsEntry + 0, // 3: containerd.services.leases.v1.CreateResponse.lease:type_name -> containerd.services.leases.v1.Lease + 0, // 4: containerd.services.leases.v1.ListResponse.leases:type_name -> containerd.services.leases.v1.Lease + 6, // 5: containerd.services.leases.v1.AddResourceRequest.resource:type_name -> containerd.services.leases.v1.Resource + 6, // 6: containerd.services.leases.v1.DeleteResourceRequest.resource:type_name -> containerd.services.leases.v1.Resource + 6, // 7: containerd.services.leases.v1.ListResourcesResponse.resources:type_name -> containerd.services.leases.v1.Resource + 1, // 8: containerd.services.leases.v1.Leases.Create:input_type -> containerd.services.leases.v1.CreateRequest + 3, // 9: containerd.services.leases.v1.Leases.Delete:input_type -> containerd.services.leases.v1.DeleteRequest + 4, // 10: containerd.services.leases.v1.Leases.List:input_type -> containerd.services.leases.v1.ListRequest + 7, // 11: containerd.services.leases.v1.Leases.AddResource:input_type -> containerd.services.leases.v1.AddResourceRequest + 8, // 12: containerd.services.leases.v1.Leases.DeleteResource:input_type -> containerd.services.leases.v1.DeleteResourceRequest + 9, // 13: containerd.services.leases.v1.Leases.ListResources:input_type -> containerd.services.leases.v1.ListResourcesRequest + 2, // 14: containerd.services.leases.v1.Leases.Create:output_type -> containerd.services.leases.v1.CreateResponse + 14, // 15: containerd.services.leases.v1.Leases.Delete:output_type -> google.protobuf.Empty + 5, // 16: containerd.services.leases.v1.Leases.List:output_type -> containerd.services.leases.v1.ListResponse + 14, // 17: containerd.services.leases.v1.Leases.AddResource:output_type -> google.protobuf.Empty + 14, // 18: containerd.services.leases.v1.Leases.DeleteResource:output_type -> google.protobuf.Empty + 10, // 19: containerd.services.leases.v1.Leases.ListResources:output_type -> containerd.services.leases.v1.ListResourcesResponse + 14, // [14:20] is the sub-list for method output_type + 8, // [8:14] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_init() } +func file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_init() { + if File_github_com_containerd_containerd_api_services_leases_v1_leases_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Lease); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Resource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddResourceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteResourceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListResourcesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListResourcesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDesc, + NumEnums: 0, + NumMessages: 13, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_leases_v1_leases_proto = out.File + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_leases_v1_leases_proto_depIdxs = nil } -func (m *Lease) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovLeases(uint64(l)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt) - n += 1 + l + sovLeases(uint64(l)) - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovLeases(uint64(len(k))) + 1 + len(v) + sovLeases(uint64(len(v))) - n += mapEntrySize + 1 + sovLeases(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CreateRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovLeases(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovLeases(uint64(len(k))) + 1 + len(v) + sovLeases(uint64(len(v))) - n += mapEntrySize + 1 + sovLeases(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CreateResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Lease != nil { - l = m.Lease.Size() - n += 1 + l + sovLeases(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DeleteRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovLeases(uint64(l)) - } - if m.Sync { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Filters) > 0 { - for _, s := range m.Filters { - l = len(s) - n += 1 + l + sovLeases(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Leases) > 0 { - for _, e := range m.Leases { - l = e.Size() - n += 1 + l + sovLeases(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Resource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovLeases(uint64(l)) - } - l = len(m.Type) - if l > 0 { - n += 1 + l + sovLeases(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AddResourceRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovLeases(uint64(l)) - } - l = m.Resource.Size() - n += 1 + l + sovLeases(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DeleteResourceRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovLeases(uint64(l)) - } - l = m.Resource.Size() - n += 1 + l + sovLeases(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListResourcesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovLeases(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListResourcesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Resources) > 0 { - for _, e := range m.Resources { - l = e.Size() - n += 1 + l + sovLeases(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovLeases(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozLeases(x uint64) (n int) { - return sovLeases(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Lease) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&Lease{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `CreatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CreatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CreateRequest) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&CreateRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CreateResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CreateResponse{`, - `Lease:` + strings.Replace(this.Lease.String(), "Lease", "Lease", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DeleteRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeleteRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Sync:` + fmt.Sprintf("%v", this.Sync) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ListRequest{`, - `Filters:` + fmt.Sprintf("%v", this.Filters) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForLeases := "[]*Lease{" - for _, f := range this.Leases { - repeatedStringForLeases += strings.Replace(f.String(), "Lease", "Lease", 1) + "," - } - repeatedStringForLeases += "}" - s := strings.Join([]string{`&ListResponse{`, - `Leases:` + repeatedStringForLeases + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *Resource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Resource{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *AddResourceRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AddResourceRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Resource:` + strings.Replace(strings.Replace(this.Resource.String(), "Resource", "Resource", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DeleteResourceRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeleteResourceRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Resource:` + strings.Replace(strings.Replace(this.Resource.String(), "Resource", "Resource", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListResourcesRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ListResourcesRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListResourcesResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForResources := "[]Resource{" - for _, f := range this.Resources { - repeatedStringForResources += strings.Replace(strings.Replace(f.String(), "Resource", "Resource", 1), `&`, ``, 1) + "," - } - repeatedStringForResources += "}" - s := strings.Join([]string{`&ListResourcesResponse{`, - `Resources:` + repeatedStringForResources + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringLeases(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Lease) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Lease: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Lease: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthLeases - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthLeases - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthLeases - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthLeases - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipLeases(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLeases - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLeases(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLeases - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CreateRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CreateRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CreateRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthLeases - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthLeases - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthLeases - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthLeases - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipLeases(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLeases - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLeases(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLeases - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CreateResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CreateResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CreateResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Lease == nil { - m.Lease = &Lease{} - } - if err := m.Lease.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLeases(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLeases - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Sync", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Sync = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipLeases(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLeases - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLeases(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLeases - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Leases = append(m.Leases, &Lease{}) - if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLeases(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLeases - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Resource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Resource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Resource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLeases(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLeases - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AddResourceRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AddResourceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AddResourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLeases(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLeases - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteResourceRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteResourceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteResourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLeases(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLeases - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListResourcesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListResourcesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListResourcesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLeases(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLeases - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListResourcesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListResourcesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListResourcesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLeases - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLeases - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLeases - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resources = append(m.Resources, Resource{}) - if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLeases(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLeases - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipLeases(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLeases - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLeases - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLeases - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthLeases - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupLeases - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthLeases - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthLeases = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowLeases = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupLeases = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto index 6aa61faedf778..8551fcea7f64f 100644 --- a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto +++ b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases.proto @@ -17,7 +17,6 @@ syntax = "proto3"; package containerd.services.leases.v1; -import weak "gogoproto/gogo.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/timestamp.proto"; @@ -52,7 +51,7 @@ service Leases { message Lease { string id = 1; - google.protobuf.Timestamp created_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp created_at = 2; map labels = 3; } @@ -99,13 +98,13 @@ message Resource { message AddResourceRequest { string id = 1; - Resource resource = 2 [(gogoproto.nullable) = false]; + Resource resource = 2; } message DeleteResourceRequest { string id = 1; - Resource resource = 2 [(gogoproto.nullable) = false]; + Resource resource = 2; } message ListResourcesRequest { @@ -113,5 +112,5 @@ message ListResourcesRequest { } message ListResourcesResponse { - repeated Resource resources = 1 [(gogoproto.nullable) = false]; + repeated Resource resources = 1 ; } diff --git a/vendor/github.com/containerd/containerd/api/services/leases/v1/leases_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases_grpc.pb.go new file mode 100644 index 0000000000000..1ecf91ecf10ae --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/leases/v1/leases_grpc.pb.go @@ -0,0 +1,306 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/leases/v1/leases.proto + +package leases + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// LeasesClient is the client API for Leases service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type LeasesClient interface { + // Create creates a new lease for managing changes to metadata. A lease + // can be used to protect objects from being removed. + Create(ctx context.Context, in *CreateRequest, opts ...grpc.CallOption) (*CreateResponse, error) + // Delete deletes the lease and makes any unreferenced objects created + // during the lease eligible for garbage collection if not referenced + // or retained by other resources during the lease. + Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // List lists all active leases, returning the full list of + // leases and optionally including the referenced resources. + List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error) + // AddResource references the resource by the provided lease. + AddResource(ctx context.Context, in *AddResourceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // DeleteResource dereferences the resource by the provided lease. + DeleteResource(ctx context.Context, in *DeleteResourceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // ListResources lists all the resources referenced by the lease. + ListResources(ctx context.Context, in *ListResourcesRequest, opts ...grpc.CallOption) (*ListResourcesResponse, error) +} + +type leasesClient struct { + cc grpc.ClientConnInterface +} + +func NewLeasesClient(cc grpc.ClientConnInterface) LeasesClient { + return &leasesClient{cc} +} + +func (c *leasesClient) Create(ctx context.Context, in *CreateRequest, opts ...grpc.CallOption) (*CreateResponse, error) { + out := new(CreateResponse) + err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leasesClient) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leasesClient) List(ctx context.Context, in *ListRequest, opts ...grpc.CallOption) (*ListResponse, error) { + out := new(ListResponse) + err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leasesClient) AddResource(ctx context.Context, in *AddResourceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/AddResource", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leasesClient) DeleteResource(ctx context.Context, in *DeleteResourceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/DeleteResource", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *leasesClient) ListResources(ctx context.Context, in *ListResourcesRequest, opts ...grpc.CallOption) (*ListResourcesResponse, error) { + out := new(ListResourcesResponse) + err := c.cc.Invoke(ctx, "/containerd.services.leases.v1.Leases/ListResources", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// LeasesServer is the server API for Leases service. +// All implementations must embed UnimplementedLeasesServer +// for forward compatibility +type LeasesServer interface { + // Create creates a new lease for managing changes to metadata. A lease + // can be used to protect objects from being removed. + Create(context.Context, *CreateRequest) (*CreateResponse, error) + // Delete deletes the lease and makes any unreferenced objects created + // during the lease eligible for garbage collection if not referenced + // or retained by other resources during the lease. + Delete(context.Context, *DeleteRequest) (*emptypb.Empty, error) + // List lists all active leases, returning the full list of + // leases and optionally including the referenced resources. + List(context.Context, *ListRequest) (*ListResponse, error) + // AddResource references the resource by the provided lease. + AddResource(context.Context, *AddResourceRequest) (*emptypb.Empty, error) + // DeleteResource dereferences the resource by the provided lease. + DeleteResource(context.Context, *DeleteResourceRequest) (*emptypb.Empty, error) + // ListResources lists all the resources referenced by the lease. + ListResources(context.Context, *ListResourcesRequest) (*ListResourcesResponse, error) + mustEmbedUnimplementedLeasesServer() +} + +// UnimplementedLeasesServer must be embedded to have forward compatible implementations. +type UnimplementedLeasesServer struct { +} + +func (UnimplementedLeasesServer) Create(context.Context, *CreateRequest) (*CreateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") +} +func (UnimplementedLeasesServer) Delete(context.Context, *DeleteRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (UnimplementedLeasesServer) List(context.Context, *ListRequest) (*ListResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method List not implemented") +} +func (UnimplementedLeasesServer) AddResource(context.Context, *AddResourceRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method AddResource not implemented") +} +func (UnimplementedLeasesServer) DeleteResource(context.Context, *DeleteResourceRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteResource not implemented") +} +func (UnimplementedLeasesServer) ListResources(context.Context, *ListResourcesRequest) (*ListResourcesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListResources not implemented") +} +func (UnimplementedLeasesServer) mustEmbedUnimplementedLeasesServer() {} + +// UnsafeLeasesServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to LeasesServer will +// result in compilation errors. +type UnsafeLeasesServer interface { + mustEmbedUnimplementedLeasesServer() +} + +func RegisterLeasesServer(s grpc.ServiceRegistrar, srv LeasesServer) { + s.RegisterService(&Leases_ServiceDesc, srv) +} + +func _Leases_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeasesServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.leases.v1.Leases/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeasesServer).Create(ctx, req.(*CreateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Leases_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeasesServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.leases.v1.Leases/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeasesServer).Delete(ctx, req.(*DeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Leases_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeasesServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.leases.v1.Leases/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeasesServer).List(ctx, req.(*ListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Leases_AddResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddResourceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeasesServer).AddResource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.leases.v1.Leases/AddResource", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeasesServer).AddResource(ctx, req.(*AddResourceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Leases_DeleteResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteResourceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeasesServer).DeleteResource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.leases.v1.Leases/DeleteResource", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeasesServer).DeleteResource(ctx, req.(*DeleteResourceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Leases_ListResources_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListResourcesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LeasesServer).ListResources(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.leases.v1.Leases/ListResources", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LeasesServer).ListResources(ctx, req.(*ListResourcesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Leases_ServiceDesc is the grpc.ServiceDesc for Leases service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Leases_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.leases.v1.Leases", + HandlerType: (*LeasesServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Create", + Handler: _Leases_Create_Handler, + }, + { + MethodName: "Delete", + Handler: _Leases_Delete_Handler, + }, + { + MethodName: "List", + Handler: _Leases_List_Handler, + }, + { + MethodName: "AddResource", + Handler: _Leases_AddResource_Handler, + }, + { + MethodName: "DeleteResource", + Handler: _Leases_DeleteResource_Handler, + }, + { + MethodName: "ListResources", + Handler: _Leases_ListResources_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/containerd/containerd/api/services/leases/v1/leases.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go index 76f9e117266e1..a75a315c43c46 100644 --- a/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go @@ -1,36 +1,47 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto package namespaces import ( - context "context" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - types "github.com/gogo/protobuf/types" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type Namespace struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Labels provides an area to include arbitrary data on namespaces. // @@ -38,2481 +49,819 @@ type Namespace struct { // // Note that to add a new value to this field, read the existing set and // include the entire result in the update call. - Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *Namespace) Reset() { *m = Namespace{} } -func (*Namespace) ProtoMessage() {} -func (*Namespace) Descriptor() ([]byte, []int) { - return fileDescriptor_8c41761eaeea4fd3, []int{0} -} -func (m *Namespace) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Namespace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Namespace.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *Namespace) Reset() { + *x = Namespace{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *Namespace) XXX_Merge(src proto.Message) { - xxx_messageInfo_Namespace.Merge(m, src) -} -func (m *Namespace) XXX_Size() int { - return m.Size() -} -func (m *Namespace) XXX_DiscardUnknown() { - xxx_messageInfo_Namespace.DiscardUnknown(m) -} - -var xxx_messageInfo_Namespace proto.InternalMessageInfo - -type GetNamespaceRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} -func (m *GetNamespaceRequest) Reset() { *m = GetNamespaceRequest{} } -func (*GetNamespaceRequest) ProtoMessage() {} -func (*GetNamespaceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_8c41761eaeea4fd3, []int{1} -} -func (m *GetNamespaceRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetNamespaceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetNamespaceRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GetNamespaceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetNamespaceRequest.Merge(m, src) -} -func (m *GetNamespaceRequest) XXX_Size() int { - return m.Size() -} -func (m *GetNamespaceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetNamespaceRequest.DiscardUnknown(m) +func (x *Namespace) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_GetNamespaceRequest proto.InternalMessageInfo - -type GetNamespaceResponse struct { - Namespace Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} +func (*Namespace) ProtoMessage() {} -func (m *GetNamespaceResponse) Reset() { *m = GetNamespaceResponse{} } -func (*GetNamespaceResponse) ProtoMessage() {} -func (*GetNamespaceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_8c41761eaeea4fd3, []int{2} -} -func (m *GetNamespaceResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetNamespaceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetNamespaceResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (x *Namespace) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *GetNamespaceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetNamespaceResponse.Merge(m, src) -} -func (m *GetNamespaceResponse) XXX_Size() int { - return m.Size() -} -func (m *GetNamespaceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetNamespaceResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetNamespaceResponse proto.InternalMessageInfo -type ListNamespacesRequest struct { - Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +// Deprecated: Use Namespace.ProtoReflect.Descriptor instead. +func (*Namespace) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescGZIP(), []int{0} } -func (m *ListNamespacesRequest) Reset() { *m = ListNamespacesRequest{} } -func (*ListNamespacesRequest) ProtoMessage() {} -func (*ListNamespacesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_8c41761eaeea4fd3, []int{3} -} -func (m *ListNamespacesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListNamespacesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListNamespacesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *Namespace) GetName() string { + if x != nil { + return x.Name } -} -func (m *ListNamespacesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListNamespacesRequest.Merge(m, src) -} -func (m *ListNamespacesRequest) XXX_Size() int { - return m.Size() -} -func (m *ListNamespacesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListNamespacesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListNamespacesRequest proto.InternalMessageInfo - -type ListNamespacesResponse struct { - Namespaces []Namespace `protobuf:"bytes,1,rep,name=namespaces,proto3" json:"namespaces"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + return "" } -func (m *ListNamespacesResponse) Reset() { *m = ListNamespacesResponse{} } -func (*ListNamespacesResponse) ProtoMessage() {} -func (*ListNamespacesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_8c41761eaeea4fd3, []int{4} -} -func (m *ListNamespacesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListNamespacesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListNamespacesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *Namespace) GetLabels() map[string]string { + if x != nil { + return x.Labels } -} -func (m *ListNamespacesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListNamespacesResponse.Merge(m, src) -} -func (m *ListNamespacesResponse) XXX_Size() int { - return m.Size() -} -func (m *ListNamespacesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListNamespacesResponse.DiscardUnknown(m) + return nil } -var xxx_messageInfo_ListNamespacesResponse proto.InternalMessageInfo +type GetNamespaceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -type CreateNamespaceRequest struct { - Namespace Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } -func (m *CreateNamespaceRequest) Reset() { *m = CreateNamespaceRequest{} } -func (*CreateNamespaceRequest) ProtoMessage() {} -func (*CreateNamespaceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_8c41761eaeea4fd3, []int{5} -} -func (m *CreateNamespaceRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CreateNamespaceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CreateNamespaceRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *GetNamespaceRequest) Reset() { + *x = GetNamespaceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *CreateNamespaceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateNamespaceRequest.Merge(m, src) -} -func (m *CreateNamespaceRequest) XXX_Size() int { - return m.Size() -} -func (m *CreateNamespaceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CreateNamespaceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateNamespaceRequest proto.InternalMessageInfo - -type CreateNamespaceResponse struct { - Namespace Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} -func (m *CreateNamespaceResponse) Reset() { *m = CreateNamespaceResponse{} } -func (*CreateNamespaceResponse) ProtoMessage() {} -func (*CreateNamespaceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_8c41761eaeea4fd3, []int{6} -} -func (m *CreateNamespaceResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CreateNamespaceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CreateNamespaceResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CreateNamespaceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateNamespaceResponse.Merge(m, src) -} -func (m *CreateNamespaceResponse) XXX_Size() int { - return m.Size() -} -func (m *CreateNamespaceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CreateNamespaceResponse.DiscardUnknown(m) +func (x *GetNamespaceRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_CreateNamespaceResponse proto.InternalMessageInfo - -// UpdateNamespaceRequest updates the metadata for a namespace. -// -// The operation should follow semantics described in -// https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-mask, -// unless otherwise qualified. -type UpdateNamespaceRequest struct { - // Namespace provides the target value, as declared by the mask, for the update. - // - // The namespace field must be set. - Namespace Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace"` - // UpdateMask specifies which fields to perform the update on. If empty, - // the operation applies to all fields. - // - // For the most part, this applies only to selectively updating labels on - // the namespace. While field masks are typically limited to ascii alphas - // and digits, we just take everything after the "labels." as the map key. - UpdateMask *types.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} +func (*GetNamespaceRequest) ProtoMessage() {} -func (m *UpdateNamespaceRequest) Reset() { *m = UpdateNamespaceRequest{} } -func (*UpdateNamespaceRequest) ProtoMessage() {} -func (*UpdateNamespaceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_8c41761eaeea4fd3, []int{7} -} -func (m *UpdateNamespaceRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UpdateNamespaceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UpdateNamespaceRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (x *GetNamespaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *UpdateNamespaceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateNamespaceRequest.Merge(m, src) -} -func (m *UpdateNamespaceRequest) XXX_Size() int { - return m.Size() -} -func (m *UpdateNamespaceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateNamespaceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_UpdateNamespaceRequest proto.InternalMessageInfo -type UpdateNamespaceResponse struct { - Namespace Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +// Deprecated: Use GetNamespaceRequest.ProtoReflect.Descriptor instead. +func (*GetNamespaceRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescGZIP(), []int{1} } -func (m *UpdateNamespaceResponse) Reset() { *m = UpdateNamespaceResponse{} } -func (*UpdateNamespaceResponse) ProtoMessage() {} -func (*UpdateNamespaceResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_8c41761eaeea4fd3, []int{8} -} -func (m *UpdateNamespaceResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UpdateNamespaceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UpdateNamespaceResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *GetNamespaceRequest) GetName() string { + if x != nil { + return x.Name } -} -func (m *UpdateNamespaceResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateNamespaceResponse.Merge(m, src) -} -func (m *UpdateNamespaceResponse) XXX_Size() int { - return m.Size() -} -func (m *UpdateNamespaceResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateNamespaceResponse.DiscardUnknown(m) + return "" } -var xxx_messageInfo_UpdateNamespaceResponse proto.InternalMessageInfo +type GetNamespaceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -type DeleteNamespaceRequest struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Namespace *Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` } -func (m *DeleteNamespaceRequest) Reset() { *m = DeleteNamespaceRequest{} } -func (*DeleteNamespaceRequest) ProtoMessage() {} -func (*DeleteNamespaceRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_8c41761eaeea4fd3, []int{9} -} -func (m *DeleteNamespaceRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteNamespaceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteNamespaceRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *GetNamespaceResponse) Reset() { + *x = GetNamespaceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *DeleteNamespaceRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteNamespaceRequest.Merge(m, src) -} -func (m *DeleteNamespaceRequest) XXX_Size() int { - return m.Size() -} -func (m *DeleteNamespaceRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteNamespaceRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteNamespaceRequest proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Namespace)(nil), "containerd.services.namespaces.v1.Namespace") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.namespaces.v1.Namespace.LabelsEntry") - proto.RegisterType((*GetNamespaceRequest)(nil), "containerd.services.namespaces.v1.GetNamespaceRequest") - proto.RegisterType((*GetNamespaceResponse)(nil), "containerd.services.namespaces.v1.GetNamespaceResponse") - proto.RegisterType((*ListNamespacesRequest)(nil), "containerd.services.namespaces.v1.ListNamespacesRequest") - proto.RegisterType((*ListNamespacesResponse)(nil), "containerd.services.namespaces.v1.ListNamespacesResponse") - proto.RegisterType((*CreateNamespaceRequest)(nil), "containerd.services.namespaces.v1.CreateNamespaceRequest") - proto.RegisterType((*CreateNamespaceResponse)(nil), "containerd.services.namespaces.v1.CreateNamespaceResponse") - proto.RegisterType((*UpdateNamespaceRequest)(nil), "containerd.services.namespaces.v1.UpdateNamespaceRequest") - proto.RegisterType((*UpdateNamespaceResponse)(nil), "containerd.services.namespaces.v1.UpdateNamespaceResponse") - proto.RegisterType((*DeleteNamespaceRequest)(nil), "containerd.services.namespaces.v1.DeleteNamespaceRequest") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto", fileDescriptor_8c41761eaeea4fd3) -} - -var fileDescriptor_8c41761eaeea4fd3 = []byte{ - // 551 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x6e, 0xd3, 0x4c, - 0x14, 0xcd, 0x24, 0xf9, 0x2c, 0xe5, 0x7a, 0xf3, 0x69, 0x08, 0x26, 0x32, 0x92, 0x09, 0x5e, 0x15, - 0xa9, 0x1a, 0xab, 0x41, 0x82, 0xfe, 0xec, 0x0a, 0x6d, 0x17, 0x14, 0x84, 0x2c, 0x21, 0x21, 0x58, - 0x80, 0x93, 0x4c, 0x5c, 0x13, 0xc7, 0x36, 0x9e, 0xb1, 0xa5, 0x88, 0x05, 0xbc, 0x0d, 0x1b, 0x1e, - 0x24, 0x4b, 0x96, 0xac, 0x50, 0x9b, 0x27, 0x41, 0x33, 0x76, 0xe2, 0xd0, 0x18, 0xe1, 0x06, 0xca, - 0xee, 0x5e, 0x7b, 0xce, 0x3d, 0x67, 0xae, 0xce, 0xb1, 0xe1, 0x89, 0xeb, 0xf1, 0xb3, 0xa4, 0x4f, - 0x06, 0xe1, 0xc4, 0x1a, 0x84, 0x01, 0x77, 0xbc, 0x80, 0xc6, 0xc3, 0xd5, 0xd2, 0x89, 0x3c, 0x8b, - 0xd1, 0x38, 0xf5, 0x06, 0x94, 0x59, 0x81, 0x33, 0xa1, 0x2c, 0x72, 0x44, 0x99, 0xee, 0x14, 0x1d, - 0x89, 0xe2, 0x90, 0x87, 0xf8, 0x6e, 0x01, 0x23, 0x0b, 0x08, 0x29, 0x20, 0x24, 0xdd, 0xd1, 0xdb, - 0x6e, 0xe8, 0x86, 0xf2, 0xb4, 0x25, 0xaa, 0x0c, 0xa8, 0xdf, 0x76, 0xc3, 0xd0, 0xf5, 0xa9, 0x25, - 0xbb, 0x7e, 0x32, 0xb2, 0xe8, 0x24, 0xe2, 0xd3, 0xfc, 0x65, 0xf7, 0xf2, 0xcb, 0x91, 0x47, 0xfd, - 0xe1, 0x9b, 0x89, 0xc3, 0xc6, 0xd9, 0x09, 0xf3, 0x0b, 0x82, 0xd6, 0xb3, 0x05, 0x0d, 0xc6, 0xd0, - 0x14, 0x9c, 0x1d, 0xd4, 0x45, 0x5b, 0x2d, 0x5b, 0xd6, 0xf8, 0x39, 0x28, 0xbe, 0xd3, 0xa7, 0x3e, - 0xeb, 0xd4, 0xbb, 0x8d, 0x2d, 0xb5, 0xb7, 0x4b, 0x7e, 0x2b, 0x95, 0x2c, 0x27, 0x92, 0x53, 0x09, - 0x3d, 0x0a, 0x78, 0x3c, 0xb5, 0xf3, 0x39, 0xfa, 0x1e, 0xa8, 0x2b, 0x8f, 0xf1, 0xff, 0xd0, 0x18, - 0xd3, 0x69, 0xce, 0x29, 0x4a, 0xdc, 0x86, 0xff, 0x52, 0xc7, 0x4f, 0x68, 0xa7, 0x2e, 0x9f, 0x65, - 0xcd, 0x7e, 0x7d, 0x17, 0x99, 0xf7, 0xe0, 0xc6, 0x09, 0xe5, 0xcb, 0xf1, 0x36, 0x7d, 0x9f, 0x50, - 0xc6, 0xcb, 0x74, 0x9b, 0x67, 0xd0, 0xfe, 0xf9, 0x28, 0x8b, 0xc2, 0x80, 0x89, 0xfb, 0xb4, 0x96, - 0x62, 0x25, 0x40, 0xed, 0x6d, 0x5f, 0xe5, 0x4a, 0x87, 0xcd, 0xd9, 0xf7, 0x3b, 0x35, 0xbb, 0x18, - 0x62, 0x5a, 0x70, 0xf3, 0xd4, 0x63, 0x05, 0x15, 0x5b, 0xc8, 0xd2, 0x40, 0x19, 0x79, 0x3e, 0xa7, - 0x71, 0x2e, 0x2c, 0xef, 0x4c, 0x1f, 0xb4, 0xcb, 0x80, 0x5c, 0x9c, 0x0d, 0x50, 0xd0, 0x76, 0x90, - 0x5c, 0xf8, 0x26, 0xea, 0x56, 0xa6, 0x98, 0xef, 0x40, 0x7b, 0x14, 0x53, 0x87, 0xd3, 0xb5, 0xb5, - 0xfd, 0xfd, 0x55, 0x8c, 0xe1, 0xd6, 0x1a, 0xd7, 0xb5, 0xed, 0xfd, 0x33, 0x02, 0xed, 0x45, 0x34, - 0xfc, 0x27, 0x37, 0xc3, 0x07, 0xa0, 0x26, 0x92, 0x4b, 0xa6, 0x47, 0x3a, 0x53, 0xed, 0xe9, 0x24, - 0x0b, 0x18, 0x59, 0x04, 0x8c, 0x1c, 0x8b, 0x80, 0x3d, 0x75, 0xd8, 0xd8, 0x86, 0xec, 0xb8, 0xa8, - 0xc5, 0x5a, 0xd6, 0x84, 0x5e, 0xdb, 0x5a, 0xb6, 0x41, 0x7b, 0x4c, 0x7d, 0x5a, 0xb2, 0x95, 0x92, - 0x98, 0xf4, 0xce, 0x9b, 0x00, 0x85, 0x11, 0x71, 0x0a, 0x8d, 0x13, 0xca, 0xf1, 0x83, 0x0a, 0x12, - 0x4a, 0x82, 0xa8, 0x3f, 0xbc, 0x32, 0x2e, 0x5f, 0xc3, 0x07, 0x68, 0x8a, 0x48, 0xe0, 0x2a, 0x5f, - 0x97, 0xd2, 0xb0, 0xe9, 0x7b, 0x1b, 0x20, 0x73, 0xf2, 0x8f, 0xa0, 0x64, 0xae, 0xc5, 0x55, 0x86, - 0x94, 0x87, 0x49, 0xdf, 0xdf, 0x04, 0x5a, 0x08, 0xc8, 0xfc, 0x51, 0x49, 0x40, 0xb9, 0xe7, 0x2b, - 0x09, 0xf8, 0x95, 0x0b, 0x5f, 0x83, 0x92, 0x79, 0xa6, 0x92, 0x80, 0x72, 0x7b, 0xe9, 0xda, 0x5a, - 0x1a, 0x8e, 0xc4, 0xbf, 0xe8, 0xf0, 0xed, 0xec, 0xc2, 0xa8, 0x7d, 0xbb, 0x30, 0x6a, 0x9f, 0xe6, - 0x06, 0x9a, 0xcd, 0x0d, 0xf4, 0x75, 0x6e, 0xa0, 0xf3, 0xb9, 0x81, 0x5e, 0x1d, 0xff, 0xc1, 0x2f, - 0xf4, 0xa0, 0xe8, 0x5e, 0xd6, 0xfa, 0x8a, 0xe4, 0xbc, 0xff, 0x23, 0x00, 0x00, 0xff, 0xff, 0x4f, - 0x4a, 0x87, 0xf3, 0x95, 0x07, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// NamespacesClient is the client API for Namespaces service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type NamespacesClient interface { - Get(ctx context.Context, in *GetNamespaceRequest, opts ...grpc.CallOption) (*GetNamespaceResponse, error) - List(ctx context.Context, in *ListNamespacesRequest, opts ...grpc.CallOption) (*ListNamespacesResponse, error) - Create(ctx context.Context, in *CreateNamespaceRequest, opts ...grpc.CallOption) (*CreateNamespaceResponse, error) - Update(ctx context.Context, in *UpdateNamespaceRequest, opts ...grpc.CallOption) (*UpdateNamespaceResponse, error) - Delete(ctx context.Context, in *DeleteNamespaceRequest, opts ...grpc.CallOption) (*types.Empty, error) -} - -type namespacesClient struct { - cc *grpc.ClientConn -} - -func NewNamespacesClient(cc *grpc.ClientConn) NamespacesClient { - return &namespacesClient{cc} -} -func (c *namespacesClient) Get(ctx context.Context, in *GetNamespaceRequest, opts ...grpc.CallOption) (*GetNamespaceResponse, error) { - out := new(GetNamespaceResponse) - err := c.cc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Get", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +func (x *GetNamespaceResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (c *namespacesClient) List(ctx context.Context, in *ListNamespacesRequest, opts ...grpc.CallOption) (*ListNamespacesResponse, error) { - out := new(ListNamespacesResponse) - err := c.cc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/List", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} +func (*GetNamespaceResponse) ProtoMessage() {} -func (c *namespacesClient) Create(ctx context.Context, in *CreateNamespaceRequest, opts ...grpc.CallOption) (*CreateNamespaceResponse, error) { - out := new(CreateNamespaceResponse) - err := c.cc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Create", in, out, opts...) - if err != nil { - return nil, err +func (x *GetNamespaceResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return out, nil + return mi.MessageOf(x) } -func (c *namespacesClient) Update(ctx context.Context, in *UpdateNamespaceRequest, opts ...grpc.CallOption) (*UpdateNamespaceResponse, error) { - out := new(UpdateNamespaceResponse) - err := c.cc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Update", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +// Deprecated: Use GetNamespaceResponse.ProtoReflect.Descriptor instead. +func (*GetNamespaceResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescGZIP(), []int{2} } -func (c *namespacesClient) Delete(ctx context.Context, in *DeleteNamespaceRequest, opts ...grpc.CallOption) (*types.Empty, error) { - out := new(types.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Delete", in, out, opts...) - if err != nil { - return nil, err +func (x *GetNamespaceResponse) GetNamespace() *Namespace { + if x != nil { + return x.Namespace } - return out, nil -} - -// NamespacesServer is the server API for Namespaces service. -type NamespacesServer interface { - Get(context.Context, *GetNamespaceRequest) (*GetNamespaceResponse, error) - List(context.Context, *ListNamespacesRequest) (*ListNamespacesResponse, error) - Create(context.Context, *CreateNamespaceRequest) (*CreateNamespaceResponse, error) - Update(context.Context, *UpdateNamespaceRequest) (*UpdateNamespaceResponse, error) - Delete(context.Context, *DeleteNamespaceRequest) (*types.Empty, error) -} - -// UnimplementedNamespacesServer can be embedded to have forward compatible implementations. -type UnimplementedNamespacesServer struct { -} - -func (*UnimplementedNamespacesServer) Get(ctx context.Context, req *GetNamespaceRequest) (*GetNamespaceResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") -} -func (*UnimplementedNamespacesServer) List(ctx context.Context, req *ListNamespacesRequest) (*ListNamespacesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method List not implemented") -} -func (*UnimplementedNamespacesServer) Create(ctx context.Context, req *CreateNamespaceRequest) (*CreateNamespaceResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") -} -func (*UnimplementedNamespacesServer) Update(ctx context.Context, req *UpdateNamespaceRequest) (*UpdateNamespaceResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") -} -func (*UnimplementedNamespacesServer) Delete(ctx context.Context, req *DeleteNamespaceRequest) (*types.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") + return nil } -func RegisterNamespacesServer(s *grpc.Server, srv NamespacesServer) { - s.RegisterService(&_Namespaces_serviceDesc, srv) -} +type ListNamespacesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func _Namespaces_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetNamespaceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NamespacesServer).Get(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.namespaces.v1.Namespaces/Get", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NamespacesServer).Get(ctx, req.(*GetNamespaceRequest)) - } - return interceptor(ctx, in, info, handler) + Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` } -func _Namespaces_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListNamespacesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NamespacesServer).List(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.namespaces.v1.Namespaces/List", +func (x *ListNamespacesRequest) Reset() { + *x = ListNamespacesRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NamespacesServer).List(ctx, req.(*ListNamespacesRequest)) - } - return interceptor(ctx, in, info, handler) } -func _Namespaces_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateNamespaceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NamespacesServer).Create(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.namespaces.v1.Namespaces/Create", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NamespacesServer).Create(ctx, req.(*CreateNamespaceRequest)) - } - return interceptor(ctx, in, info, handler) +func (x *ListNamespacesRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func _Namespaces_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateNamespaceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NamespacesServer).Update(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.namespaces.v1.Namespaces/Update", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NamespacesServer).Update(ctx, req.(*UpdateNamespaceRequest)) - } - return interceptor(ctx, in, info, handler) -} +func (*ListNamespacesRequest) ProtoMessage() {} -func _Namespaces_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteNamespaceRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NamespacesServer).Delete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.namespaces.v1.Namespaces/Delete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NamespacesServer).Delete(ctx, req.(*DeleteNamespaceRequest)) +func (x *ListNamespacesRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return interceptor(ctx, in, info, handler) + return mi.MessageOf(x) } -var _Namespaces_serviceDesc = grpc.ServiceDesc{ - ServiceName: "containerd.services.namespaces.v1.Namespaces", - HandlerType: (*NamespacesServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Get", - Handler: _Namespaces_Get_Handler, - }, - { - MethodName: "List", - Handler: _Namespaces_List_Handler, - }, - { - MethodName: "Create", - Handler: _Namespaces_Create_Handler, - }, - { - MethodName: "Update", - Handler: _Namespaces_Update_Handler, - }, - { - MethodName: "Delete", - Handler: _Namespaces_Delete_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto", +// Deprecated: Use ListNamespacesRequest.ProtoReflect.Descriptor instead. +func (*ListNamespacesRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescGZIP(), []int{3} } -func (m *Namespace) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *ListNamespacesRequest) GetFilter() string { + if x != nil { + return x.Filter } - return dAtA[:n], nil + return "" } -func (m *Namespace) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +type ListNamespacesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Namespace) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintNamespace(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintNamespace(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintNamespace(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + Namespaces []*Namespace `protobuf:"bytes,1,rep,name=namespaces,proto3" json:"namespaces,omitempty"` } -func (m *GetNamespaceRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *ListNamespacesResponse) Reset() { + *x = ListNamespacesResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return dAtA[:n], nil } -func (m *GetNamespaceRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *ListNamespacesResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetNamespaceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} +func (*ListNamespacesResponse) ProtoMessage() {} -func (m *GetNamespaceResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *ListNamespacesResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *GetNamespaceResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use ListNamespacesResponse.ProtoReflect.Descriptor instead. +func (*ListNamespacesResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescGZIP(), []int{4} } -func (m *GetNamespaceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Namespace.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintNamespace(dAtA, i, uint64(size)) +func (x *ListNamespacesResponse) GetNamespaces() []*Namespace { + if x != nil { + return x.Namespaces } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return nil } -func (m *ListNamespacesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} +type CreateNamespaceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *ListNamespacesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + Namespace *Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` } -func (m *ListNamespacesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Filter) > 0 { - i -= len(m.Filter) - copy(dAtA[i:], m.Filter) - i = encodeVarintNamespace(dAtA, i, uint64(len(m.Filter))) - i-- - dAtA[i] = 0xa +func (x *CreateNamespaceRequest) Reset() { + *x = CreateNamespaceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return len(dAtA) - i, nil } -func (m *ListNamespacesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (x *CreateNamespaceRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ListNamespacesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +func (*CreateNamespaceRequest) ProtoMessage() {} -func (m *ListNamespacesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Namespaces) > 0 { - for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Namespaces[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintNamespace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa +func (x *CreateNamespaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } - return len(dAtA) - i, nil -} - -func (m *CreateNamespaceRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *CreateNamespaceRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use CreateNamespaceRequest.ProtoReflect.Descriptor instead. +func (*CreateNamespaceRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescGZIP(), []int{5} } -func (m *CreateNamespaceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Namespace.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintNamespace(dAtA, i, uint64(size)) +func (x *CreateNamespaceRequest) GetNamespace() *Namespace { + if x != nil { + return x.Namespace } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return nil } -func (m *CreateNamespaceResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} +type CreateNamespaceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *CreateNamespaceResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + Namespace *Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` } -func (m *CreateNamespaceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) +func (x *CreateNamespaceResponse) Reset() { + *x = CreateNamespaceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - { - size, err := m.Namespace.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintNamespace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil } -func (m *UpdateNamespaceRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (x *CreateNamespaceResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *UpdateNamespaceRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +func (*CreateNamespaceResponse) ProtoMessage() {} -func (m *UpdateNamespaceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.UpdateMask != nil { - { - size, err := m.UpdateMask.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintNamespace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Namespace.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err +func (x *CreateNamespaceResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - i -= size - i = encodeVarintNamespace(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *UpdateNamespaceResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *UpdateNamespaceResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use CreateNamespaceResponse.ProtoReflect.Descriptor instead. +func (*CreateNamespaceResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescGZIP(), []int{6} } -func (m *UpdateNamespaceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Namespace.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintNamespace(dAtA, i, uint64(size)) +func (x *CreateNamespaceResponse) GetNamespace() *Namespace { + if x != nil { + return x.Namespace } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return nil } -func (m *DeleteNamespaceRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} +// UpdateNamespaceRequest updates the metadata for a namespace. +// +// The operation should follow semantics described in +// https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-mask, +// unless otherwise qualified. +type UpdateNamespaceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *DeleteNamespaceRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + // Namespace provides the target value, as declared by the mask, for the update. + // + // The namespace field must be set. + Namespace *Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + // UpdateMask specifies which fields to perform the update on. If empty, + // the operation applies to all fields. + // + // For the most part, this applies only to selectively updating labels on + // the namespace. While field masks are typically limited to ascii alphas + // and digits, we just take everything after the "labels." as the map key. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` } -func (m *DeleteNamespaceRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa +func (x *UpdateNamespaceRequest) Reset() { + *x = UpdateNamespaceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return len(dAtA) - i, nil } -func encodeVarintNamespace(dAtA []byte, offset int, v uint64) int { - offset -= sovNamespace(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Namespace) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovNamespace(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v))) - n += mapEntrySize + 1 + sovNamespace(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n +func (x *UpdateNamespaceRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetNamespaceRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovNamespace(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} +func (*UpdateNamespaceRequest) ProtoMessage() {} -func (m *GetNamespaceResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Namespace.Size() - n += 1 + l + sovNamespace(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) +func (x *UpdateNamespaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return n + return mi.MessageOf(x) } -func (m *ListNamespacesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Filter) - if l > 0 { - n += 1 + l + sovNamespace(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n +// Deprecated: Use UpdateNamespaceRequest.ProtoReflect.Descriptor instead. +func (*UpdateNamespaceRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescGZIP(), []int{7} } -func (m *ListNamespacesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Namespaces) > 0 { - for _, e := range m.Namespaces { - l = e.Size() - n += 1 + l + sovNamespace(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) +func (x *UpdateNamespaceRequest) GetNamespace() *Namespace { + if x != nil { + return x.Namespace } - return n + return nil } -func (m *CreateNamespaceRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Namespace.Size() - n += 1 + l + sovNamespace(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) +func (x *UpdateNamespaceRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask } - return n + return nil } -func (m *CreateNamespaceResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Namespace.Size() - n += 1 + l + sovNamespace(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} +type UpdateNamespaceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *UpdateNamespaceRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Namespace.Size() - n += 1 + l + sovNamespace(uint64(l)) - if m.UpdateMask != nil { - l = m.UpdateMask.Size() - n += 1 + l + sovNamespace(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n + Namespace *Namespace `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` } -func (m *UpdateNamespaceResponse) Size() (n int) { - if m == nil { - return 0 +func (x *UpdateNamespaceResponse) Reset() { + *x = UpdateNamespaceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - var l int - _ = l - l = m.Namespace.Size() - n += 1 + l + sovNamespace(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n } -func (m *DeleteNamespaceRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovNamespace(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n +func (x *UpdateNamespaceResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func sovNamespace(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozNamespace(x uint64) (n int) { - return sovNamespace(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Namespace) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&Namespace{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *GetNamespaceRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GetNamespaceRequest{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *GetNamespaceResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GetNamespaceResponse{`, - `Namespace:` + strings.Replace(strings.Replace(this.Namespace.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListNamespacesRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ListNamespacesRequest{`, - `Filter:` + fmt.Sprintf("%v", this.Filter) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListNamespacesResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForNamespaces := "[]Namespace{" - for _, f := range this.Namespaces { - repeatedStringForNamespaces += strings.Replace(strings.Replace(f.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + "," - } - repeatedStringForNamespaces += "}" - s := strings.Join([]string{`&ListNamespacesResponse{`, - `Namespaces:` + repeatedStringForNamespaces + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CreateNamespaceRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CreateNamespaceRequest{`, - `Namespace:` + strings.Replace(strings.Replace(this.Namespace.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CreateNamespaceResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CreateNamespaceResponse{`, - `Namespace:` + strings.Replace(strings.Replace(this.Namespace.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UpdateNamespaceRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UpdateNamespaceRequest{`, - `Namespace:` + strings.Replace(strings.Replace(this.Namespace.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`, - `UpdateMask:` + strings.Replace(fmt.Sprintf("%v", this.UpdateMask), "FieldMask", "types.FieldMask", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UpdateNamespaceResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UpdateNamespaceResponse{`, - `Namespace:` + strings.Replace(strings.Replace(this.Namespace.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DeleteNamespaceRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeleteNamespaceRequest{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringNamespace(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Namespace) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Namespace: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthNamespace - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthNamespace - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthNamespace - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthNamespace - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } +func (*UpdateNamespaceResponse) ProtoMessage() {} - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetNamespaceRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetNamespaceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetNamespaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy +func (x *UpdateNamespaceResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil + return mi.MessageOf(x) } -func (m *GetNamespaceResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetNamespaceResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetNamespaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Namespace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil +// Deprecated: Use UpdateNamespaceResponse.ProtoReflect.Descriptor instead. +func (*UpdateNamespaceResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescGZIP(), []int{8} } -func (m *ListNamespacesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListNamespacesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListNamespacesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *UpdateNamespaceResponse) GetNamespace() *Namespace { + if x != nil { + return x.Namespace } return nil } -func (m *ListNamespacesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListNamespacesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListNamespacesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespaces = append(m.Namespaces, Namespace{}) - if err := m.Namespaces[len(m.Namespaces)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CreateNamespaceRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CreateNamespaceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CreateNamespaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Namespace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } +type DeleteNamespaceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } -func (m *CreateNamespaceResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CreateNamespaceResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CreateNamespaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Namespace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *DeleteNamespaceRequest) Reset() { + *x = DeleteNamespaceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -func (m *UpdateNamespaceRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateNamespaceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateNamespaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Namespace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateMask", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.UpdateMask == nil { - m.UpdateMask = &types.FieldMask{} - } - if err := m.UpdateMask.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil +func (x *DeleteNamespaceRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *UpdateNamespaceResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateNamespaceResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateNamespaceResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Namespace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteNamespaceRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteNamespaceRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteNamespaceRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowNamespace - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthNamespace - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthNamespace - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipNamespace(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthNamespace - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } +func (*DeleteNamespaceRequest) ProtoMessage() {} - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipNamespace(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowNamespace - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowNamespace - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowNamespace - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthNamespace - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupNamespace - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthNamespace - } - if depth == 0 { - return iNdEx, nil +func (x *DeleteNamespaceRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } - return 0, io.ErrUnexpectedEOF + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteNamespaceRequest.ProtoReflect.Descriptor instead. +func (*DeleteNamespaceRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescGZIP(), []int{9} +} + +func (x *DeleteNamespaceRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +var File_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDesc = []byte{ + 0x0a, 0x4b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x21, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, + 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xac, 0x01, 0x0a, 0x09, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x50, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x38, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2e, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x29, + 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x62, 0x0a, 0x14, 0x47, 0x65, 0x74, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x4a, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x2f, 0x0a, + 0x15, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x66, + 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, 0x0a, 0x0a, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x0a, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x22, 0x64, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x4a, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x65, 0x0a, 0x17, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x22, 0xa1, 0x01, 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4a, + 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, + 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x65, 0x0a, 0x17, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0x2c, + 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x32, 0xe0, 0x04, 0x0a, + 0x0a, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x76, 0x0a, 0x03, 0x47, + 0x65, 0x74, 0x12, 0x36, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, + 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x38, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x7f, 0x0a, 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x39, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x7f, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x39, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x5b, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x39, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, + 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x2f, 0x76, 0x31, 0x3b, 0x6e, + 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( - ErrInvalidLengthNamespace = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowNamespace = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupNamespace = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescData = file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes = make([]protoimpl.MessageInfo, 11) +var file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_goTypes = []interface{}{ + (*Namespace)(nil), // 0: containerd.services.namespaces.v1.Namespace + (*GetNamespaceRequest)(nil), // 1: containerd.services.namespaces.v1.GetNamespaceRequest + (*GetNamespaceResponse)(nil), // 2: containerd.services.namespaces.v1.GetNamespaceResponse + (*ListNamespacesRequest)(nil), // 3: containerd.services.namespaces.v1.ListNamespacesRequest + (*ListNamespacesResponse)(nil), // 4: containerd.services.namespaces.v1.ListNamespacesResponse + (*CreateNamespaceRequest)(nil), // 5: containerd.services.namespaces.v1.CreateNamespaceRequest + (*CreateNamespaceResponse)(nil), // 6: containerd.services.namespaces.v1.CreateNamespaceResponse + (*UpdateNamespaceRequest)(nil), // 7: containerd.services.namespaces.v1.UpdateNamespaceRequest + (*UpdateNamespaceResponse)(nil), // 8: containerd.services.namespaces.v1.UpdateNamespaceResponse + (*DeleteNamespaceRequest)(nil), // 9: containerd.services.namespaces.v1.DeleteNamespaceRequest + nil, // 10: containerd.services.namespaces.v1.Namespace.LabelsEntry + (*fieldmaskpb.FieldMask)(nil), // 11: google.protobuf.FieldMask + (*emptypb.Empty)(nil), // 12: google.protobuf.Empty +} +var file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_depIdxs = []int32{ + 10, // 0: containerd.services.namespaces.v1.Namespace.labels:type_name -> containerd.services.namespaces.v1.Namespace.LabelsEntry + 0, // 1: containerd.services.namespaces.v1.GetNamespaceResponse.namespace:type_name -> containerd.services.namespaces.v1.Namespace + 0, // 2: containerd.services.namespaces.v1.ListNamespacesResponse.namespaces:type_name -> containerd.services.namespaces.v1.Namespace + 0, // 3: containerd.services.namespaces.v1.CreateNamespaceRequest.namespace:type_name -> containerd.services.namespaces.v1.Namespace + 0, // 4: containerd.services.namespaces.v1.CreateNamespaceResponse.namespace:type_name -> containerd.services.namespaces.v1.Namespace + 0, // 5: containerd.services.namespaces.v1.UpdateNamespaceRequest.namespace:type_name -> containerd.services.namespaces.v1.Namespace + 11, // 6: containerd.services.namespaces.v1.UpdateNamespaceRequest.update_mask:type_name -> google.protobuf.FieldMask + 0, // 7: containerd.services.namespaces.v1.UpdateNamespaceResponse.namespace:type_name -> containerd.services.namespaces.v1.Namespace + 1, // 8: containerd.services.namespaces.v1.Namespaces.Get:input_type -> containerd.services.namespaces.v1.GetNamespaceRequest + 3, // 9: containerd.services.namespaces.v1.Namespaces.List:input_type -> containerd.services.namespaces.v1.ListNamespacesRequest + 5, // 10: containerd.services.namespaces.v1.Namespaces.Create:input_type -> containerd.services.namespaces.v1.CreateNamespaceRequest + 7, // 11: containerd.services.namespaces.v1.Namespaces.Update:input_type -> containerd.services.namespaces.v1.UpdateNamespaceRequest + 9, // 12: containerd.services.namespaces.v1.Namespaces.Delete:input_type -> containerd.services.namespaces.v1.DeleteNamespaceRequest + 2, // 13: containerd.services.namespaces.v1.Namespaces.Get:output_type -> containerd.services.namespaces.v1.GetNamespaceResponse + 4, // 14: containerd.services.namespaces.v1.Namespaces.List:output_type -> containerd.services.namespaces.v1.ListNamespacesResponse + 6, // 15: containerd.services.namespaces.v1.Namespaces.Create:output_type -> containerd.services.namespaces.v1.CreateNamespaceResponse + 8, // 16: containerd.services.namespaces.v1.Namespaces.Update:output_type -> containerd.services.namespaces.v1.UpdateNamespaceResponse + 12, // 17: containerd.services.namespaces.v1.Namespaces.Delete:output_type -> google.protobuf.Empty + 13, // [13:18] is the sub-list for method output_type + 8, // [8:13] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_init() } +func file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_init() { + if File_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Namespace); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetNamespaceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetNamespaceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListNamespacesRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListNamespacesResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateNamespaceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateNamespaceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateNamespaceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateNamespaceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteNamespaceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDesc, + NumEnums: 0, + NumMessages: 11, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto = out.File + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_namespaces_v1_namespace_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto index 90e3051238b6f..910bcd6c72088 100644 --- a/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto +++ b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto @@ -18,7 +18,6 @@ syntax = "proto3"; package containerd.services.namespaces.v1; -import weak "gogoproto/gogo.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; @@ -60,7 +59,7 @@ message GetNamespaceRequest { } message GetNamespaceResponse { - Namespace namespace = 1 [(gogoproto.nullable) = false]; + Namespace namespace = 1; } message ListNamespacesRequest { @@ -68,15 +67,15 @@ message ListNamespacesRequest { } message ListNamespacesResponse { - repeated Namespace namespaces = 1 [(gogoproto.nullable) = false]; + repeated Namespace namespaces = 1; } message CreateNamespaceRequest { - Namespace namespace = 1 [(gogoproto.nullable) = false]; + Namespace namespace = 1; } message CreateNamespaceResponse { - Namespace namespace = 1 [(gogoproto.nullable) = false]; + Namespace namespace = 1; } // UpdateNamespaceRequest updates the metadata for a namespace. @@ -88,7 +87,7 @@ message UpdateNamespaceRequest { // Namespace provides the target value, as declared by the mask, for the update. // // The namespace field must be set. - Namespace namespace = 1 [(gogoproto.nullable) = false]; + Namespace namespace = 1; // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. @@ -100,7 +99,7 @@ message UpdateNamespaceRequest { } message UpdateNamespaceResponse { - Namespace namespace = 1 [(gogoproto.nullable) = false]; + Namespace namespace = 1; } message DeleteNamespaceRequest { diff --git a/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace_grpc.pb.go new file mode 100644 index 0000000000000..ed4e4c2ffe598 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace_grpc.pb.go @@ -0,0 +1,250 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto + +package namespaces + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// NamespacesClient is the client API for Namespaces service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type NamespacesClient interface { + Get(ctx context.Context, in *GetNamespaceRequest, opts ...grpc.CallOption) (*GetNamespaceResponse, error) + List(ctx context.Context, in *ListNamespacesRequest, opts ...grpc.CallOption) (*ListNamespacesResponse, error) + Create(ctx context.Context, in *CreateNamespaceRequest, opts ...grpc.CallOption) (*CreateNamespaceResponse, error) + Update(ctx context.Context, in *UpdateNamespaceRequest, opts ...grpc.CallOption) (*UpdateNamespaceResponse, error) + Delete(ctx context.Context, in *DeleteNamespaceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) +} + +type namespacesClient struct { + cc grpc.ClientConnInterface +} + +func NewNamespacesClient(cc grpc.ClientConnInterface) NamespacesClient { + return &namespacesClient{cc} +} + +func (c *namespacesClient) Get(ctx context.Context, in *GetNamespaceRequest, opts ...grpc.CallOption) (*GetNamespaceResponse, error) { + out := new(GetNamespaceResponse) + err := c.cc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *namespacesClient) List(ctx context.Context, in *ListNamespacesRequest, opts ...grpc.CallOption) (*ListNamespacesResponse, error) { + out := new(ListNamespacesResponse) + err := c.cc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *namespacesClient) Create(ctx context.Context, in *CreateNamespaceRequest, opts ...grpc.CallOption) (*CreateNamespaceResponse, error) { + out := new(CreateNamespaceResponse) + err := c.cc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *namespacesClient) Update(ctx context.Context, in *UpdateNamespaceRequest, opts ...grpc.CallOption) (*UpdateNamespaceResponse, error) { + out := new(UpdateNamespaceResponse) + err := c.cc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *namespacesClient) Delete(ctx context.Context, in *DeleteNamespaceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// NamespacesServer is the server API for Namespaces service. +// All implementations must embed UnimplementedNamespacesServer +// for forward compatibility +type NamespacesServer interface { + Get(context.Context, *GetNamespaceRequest) (*GetNamespaceResponse, error) + List(context.Context, *ListNamespacesRequest) (*ListNamespacesResponse, error) + Create(context.Context, *CreateNamespaceRequest) (*CreateNamespaceResponse, error) + Update(context.Context, *UpdateNamespaceRequest) (*UpdateNamespaceResponse, error) + Delete(context.Context, *DeleteNamespaceRequest) (*emptypb.Empty, error) + mustEmbedUnimplementedNamespacesServer() +} + +// UnimplementedNamespacesServer must be embedded to have forward compatible implementations. +type UnimplementedNamespacesServer struct { +} + +func (UnimplementedNamespacesServer) Get(context.Context, *GetNamespaceRequest) (*GetNamespaceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (UnimplementedNamespacesServer) List(context.Context, *ListNamespacesRequest) (*ListNamespacesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method List not implemented") +} +func (UnimplementedNamespacesServer) Create(context.Context, *CreateNamespaceRequest) (*CreateNamespaceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") +} +func (UnimplementedNamespacesServer) Update(context.Context, *UpdateNamespaceRequest) (*UpdateNamespaceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") +} +func (UnimplementedNamespacesServer) Delete(context.Context, *DeleteNamespaceRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (UnimplementedNamespacesServer) mustEmbedUnimplementedNamespacesServer() {} + +// UnsafeNamespacesServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to NamespacesServer will +// result in compilation errors. +type UnsafeNamespacesServer interface { + mustEmbedUnimplementedNamespacesServer() +} + +func RegisterNamespacesServer(s grpc.ServiceRegistrar, srv NamespacesServer) { + s.RegisterService(&Namespaces_ServiceDesc, srv) +} + +func _Namespaces_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNamespaceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NamespacesServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.namespaces.v1.Namespaces/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NamespacesServer).Get(ctx, req.(*GetNamespaceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Namespaces_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNamespacesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NamespacesServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.namespaces.v1.Namespaces/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NamespacesServer).List(ctx, req.(*ListNamespacesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Namespaces_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateNamespaceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NamespacesServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.namespaces.v1.Namespaces/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NamespacesServer).Create(ctx, req.(*CreateNamespaceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Namespaces_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateNamespaceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NamespacesServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.namespaces.v1.Namespaces/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NamespacesServer).Update(ctx, req.(*UpdateNamespaceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Namespaces_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteNamespaceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NamespacesServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.namespaces.v1.Namespaces/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NamespacesServer).Delete(ctx, req.(*DeleteNamespaceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Namespaces_ServiceDesc is the grpc.ServiceDesc for Namespaces service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Namespaces_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.namespaces.v1.Namespaces", + HandlerType: (*NamespacesServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _Namespaces_Get_Handler, + }, + { + MethodName: "List", + Handler: _Namespaces_List_Handler, + }, + { + MethodName: "Create", + Handler: _Namespaces_Create_Handler, + }, + { + MethodName: "Update", + Handler: _Namespaces_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _Namespaces_Delete_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/services/sandbox/v1/doc.go b/vendor/github.com/containerd/containerd/api/services/sandbox/v1/doc.go new file mode 100644 index 0000000000000..f960350c1613c --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/sandbox/v1/doc.go @@ -0,0 +1,17 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sandbox diff --git a/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox.pb.go b/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox.pb.go new file mode 100644 index 0000000000000..cf11797947d22 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox.pb.go @@ -0,0 +1,1938 @@ +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/sandbox/v1/sandbox.proto + +// Sandbox is a v2 runtime extension that allows more complex execution environments for containers. +// This adds a notion of groups of containers that share same lifecycle and/or resources. +// A few good fits for sandbox can be: +// - A "pause" container in k8s, that acts as a parent process for child containers to hold network namespace. +// - (micro)VMs that launch a VM process and executes containers inside guest OS. +// containerd in this case remains implementation agnostic and delegates sandbox handling to runtimes. +// See proposal and discussion here: https://github.com/containerd/containerd/issues/4131 + +package sandbox + +import ( + types "github.com/containerd/containerd/api/types" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type StoreCreateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Sandbox *types.Sandbox `protobuf:"bytes,1,opt,name=sandbox,proto3" json:"sandbox,omitempty"` +} + +func (x *StoreCreateRequest) Reset() { + *x = StoreCreateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StoreCreateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StoreCreateRequest) ProtoMessage() {} + +func (x *StoreCreateRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StoreCreateRequest.ProtoReflect.Descriptor instead. +func (*StoreCreateRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{0} +} + +func (x *StoreCreateRequest) GetSandbox() *types.Sandbox { + if x != nil { + return x.Sandbox + } + return nil +} + +type StoreCreateResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Sandbox *types.Sandbox `protobuf:"bytes,1,opt,name=sandbox,proto3" json:"sandbox,omitempty"` +} + +func (x *StoreCreateResponse) Reset() { + *x = StoreCreateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StoreCreateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StoreCreateResponse) ProtoMessage() {} + +func (x *StoreCreateResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StoreCreateResponse.ProtoReflect.Descriptor instead. +func (*StoreCreateResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{1} +} + +func (x *StoreCreateResponse) GetSandbox() *types.Sandbox { + if x != nil { + return x.Sandbox + } + return nil +} + +type StoreUpdateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Sandbox *types.Sandbox `protobuf:"bytes,1,opt,name=sandbox,proto3" json:"sandbox,omitempty"` + Fields []string `protobuf:"bytes,2,rep,name=fields,proto3" json:"fields,omitempty"` +} + +func (x *StoreUpdateRequest) Reset() { + *x = StoreUpdateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StoreUpdateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StoreUpdateRequest) ProtoMessage() {} + +func (x *StoreUpdateRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StoreUpdateRequest.ProtoReflect.Descriptor instead. +func (*StoreUpdateRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{2} +} + +func (x *StoreUpdateRequest) GetSandbox() *types.Sandbox { + if x != nil { + return x.Sandbox + } + return nil +} + +func (x *StoreUpdateRequest) GetFields() []string { + if x != nil { + return x.Fields + } + return nil +} + +type StoreUpdateResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Sandbox *types.Sandbox `protobuf:"bytes,1,opt,name=sandbox,proto3" json:"sandbox,omitempty"` +} + +func (x *StoreUpdateResponse) Reset() { + *x = StoreUpdateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StoreUpdateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StoreUpdateResponse) ProtoMessage() {} + +func (x *StoreUpdateResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StoreUpdateResponse.ProtoReflect.Descriptor instead. +func (*StoreUpdateResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{3} +} + +func (x *StoreUpdateResponse) GetSandbox() *types.Sandbox { + if x != nil { + return x.Sandbox + } + return nil +} + +type StoreDeleteRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` +} + +func (x *StoreDeleteRequest) Reset() { + *x = StoreDeleteRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StoreDeleteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StoreDeleteRequest) ProtoMessage() {} + +func (x *StoreDeleteRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StoreDeleteRequest.ProtoReflect.Descriptor instead. +func (*StoreDeleteRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{4} +} + +func (x *StoreDeleteRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +type StoreDeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *StoreDeleteResponse) Reset() { + *x = StoreDeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StoreDeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StoreDeleteResponse) ProtoMessage() {} + +func (x *StoreDeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StoreDeleteResponse.ProtoReflect.Descriptor instead. +func (*StoreDeleteResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{5} +} + +type StoreListRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` +} + +func (x *StoreListRequest) Reset() { + *x = StoreListRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StoreListRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StoreListRequest) ProtoMessage() {} + +func (x *StoreListRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StoreListRequest.ProtoReflect.Descriptor instead. +func (*StoreListRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{6} +} + +func (x *StoreListRequest) GetFilters() []string { + if x != nil { + return x.Filters + } + return nil +} + +type StoreListResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + List []*types.Sandbox `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` +} + +func (x *StoreListResponse) Reset() { + *x = StoreListResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StoreListResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StoreListResponse) ProtoMessage() {} + +func (x *StoreListResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StoreListResponse.ProtoReflect.Descriptor instead. +func (*StoreListResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{7} +} + +func (x *StoreListResponse) GetList() []*types.Sandbox { + if x != nil { + return x.List + } + return nil +} + +type StoreGetRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` +} + +func (x *StoreGetRequest) Reset() { + *x = StoreGetRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StoreGetRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StoreGetRequest) ProtoMessage() {} + +func (x *StoreGetRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StoreGetRequest.ProtoReflect.Descriptor instead. +func (*StoreGetRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{8} +} + +func (x *StoreGetRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +type StoreGetResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Sandbox *types.Sandbox `protobuf:"bytes,1,opt,name=sandbox,proto3" json:"sandbox,omitempty"` +} + +func (x *StoreGetResponse) Reset() { + *x = StoreGetResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StoreGetResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StoreGetResponse) ProtoMessage() {} + +func (x *StoreGetResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StoreGetResponse.ProtoReflect.Descriptor instead. +func (*StoreGetResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{9} +} + +func (x *StoreGetResponse) GetSandbox() *types.Sandbox { + if x != nil { + return x.Sandbox + } + return nil +} + +type ControllerCreateRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + Rootfs []*types.Mount `protobuf:"bytes,2,rep,name=rootfs,proto3" json:"rootfs,omitempty"` + Options *anypb.Any `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"` +} + +func (x *ControllerCreateRequest) Reset() { + *x = ControllerCreateRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControllerCreateRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControllerCreateRequest) ProtoMessage() {} + +func (x *ControllerCreateRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControllerCreateRequest.ProtoReflect.Descriptor instead. +func (*ControllerCreateRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{10} +} + +func (x *ControllerCreateRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +func (x *ControllerCreateRequest) GetRootfs() []*types.Mount { + if x != nil { + return x.Rootfs + } + return nil +} + +func (x *ControllerCreateRequest) GetOptions() *anypb.Any { + if x != nil { + return x.Options + } + return nil +} + +type ControllerCreateResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` +} + +func (x *ControllerCreateResponse) Reset() { + *x = ControllerCreateResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControllerCreateResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControllerCreateResponse) ProtoMessage() {} + +func (x *ControllerCreateResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControllerCreateResponse.ProtoReflect.Descriptor instead. +func (*ControllerCreateResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{11} +} + +func (x *ControllerCreateResponse) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +type ControllerStartRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` +} + +func (x *ControllerStartRequest) Reset() { + *x = ControllerStartRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControllerStartRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControllerStartRequest) ProtoMessage() {} + +func (x *ControllerStartRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControllerStartRequest.ProtoReflect.Descriptor instead. +func (*ControllerStartRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{12} +} + +func (x *ControllerStartRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +type ControllerStartResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ControllerStartResponse) Reset() { + *x = ControllerStartResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControllerStartResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControllerStartResponse) ProtoMessage() {} + +func (x *ControllerStartResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControllerStartResponse.ProtoReflect.Descriptor instead. +func (*ControllerStartResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{13} +} + +func (x *ControllerStartResponse) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +func (x *ControllerStartResponse) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} + +func (x *ControllerStartResponse) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil +} + +func (x *ControllerStartResponse) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +type ControllerPlatformRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` +} + +func (x *ControllerPlatformRequest) Reset() { + *x = ControllerPlatformRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControllerPlatformRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControllerPlatformRequest) ProtoMessage() {} + +func (x *ControllerPlatformRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControllerPlatformRequest.ProtoReflect.Descriptor instead. +func (*ControllerPlatformRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{14} +} + +func (x *ControllerPlatformRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +type ControllerPlatformResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Platform *types.Platform `protobuf:"bytes,1,opt,name=platform,proto3" json:"platform,omitempty"` +} + +func (x *ControllerPlatformResponse) Reset() { + *x = ControllerPlatformResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControllerPlatformResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControllerPlatformResponse) ProtoMessage() {} + +func (x *ControllerPlatformResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControllerPlatformResponse.ProtoReflect.Descriptor instead. +func (*ControllerPlatformResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{15} +} + +func (x *ControllerPlatformResponse) GetPlatform() *types.Platform { + if x != nil { + return x.Platform + } + return nil +} + +type ControllerStopRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + TimeoutSecs uint32 `protobuf:"varint,2,opt,name=timeout_secs,json=timeoutSecs,proto3" json:"timeout_secs,omitempty"` +} + +func (x *ControllerStopRequest) Reset() { + *x = ControllerStopRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControllerStopRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControllerStopRequest) ProtoMessage() {} + +func (x *ControllerStopRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControllerStopRequest.ProtoReflect.Descriptor instead. +func (*ControllerStopRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{16} +} + +func (x *ControllerStopRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +func (x *ControllerStopRequest) GetTimeoutSecs() uint32 { + if x != nil { + return x.TimeoutSecs + } + return 0 +} + +type ControllerStopResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ControllerStopResponse) Reset() { + *x = ControllerStopResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControllerStopResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControllerStopResponse) ProtoMessage() {} + +func (x *ControllerStopResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControllerStopResponse.ProtoReflect.Descriptor instead. +func (*ControllerStopResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{17} +} + +type ControllerWaitRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` +} + +func (x *ControllerWaitRequest) Reset() { + *x = ControllerWaitRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControllerWaitRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControllerWaitRequest) ProtoMessage() {} + +func (x *ControllerWaitRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControllerWaitRequest.ProtoReflect.Descriptor instead. +func (*ControllerWaitRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{18} +} + +func (x *ControllerWaitRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +type ControllerWaitResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ExitStatus uint32 `protobuf:"varint,1,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=exited_at,json=exitedAt,proto3" json:"exited_at,omitempty"` +} + +func (x *ControllerWaitResponse) Reset() { + *x = ControllerWaitResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControllerWaitResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControllerWaitResponse) ProtoMessage() {} + +func (x *ControllerWaitResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControllerWaitResponse.ProtoReflect.Descriptor instead. +func (*ControllerWaitResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{19} +} + +func (x *ControllerWaitResponse) GetExitStatus() uint32 { + if x != nil { + return x.ExitStatus + } + return 0 +} + +func (x *ControllerWaitResponse) GetExitedAt() *timestamppb.Timestamp { + if x != nil { + return x.ExitedAt + } + return nil +} + +type ControllerStatusRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + Verbose bool `protobuf:"varint,2,opt,name=verbose,proto3" json:"verbose,omitempty"` +} + +func (x *ControllerStatusRequest) Reset() { + *x = ControllerStatusRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControllerStatusRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControllerStatusRequest) ProtoMessage() {} + +func (x *ControllerStatusRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControllerStatusRequest.ProtoReflect.Descriptor instead. +func (*ControllerStatusRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{20} +} + +func (x *ControllerStatusRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +func (x *ControllerStatusRequest) GetVerbose() bool { + if x != nil { + return x.Verbose + } + return false +} + +type ControllerStatusResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` + State string `protobuf:"bytes,3,opt,name=state,proto3" json:"state,omitempty"` + Info map[string]string `protobuf:"bytes,4,rep,name=info,proto3" json:"info,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + ExitedAt *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=exited_at,json=exitedAt,proto3" json:"exited_at,omitempty"` + Extra *anypb.Any `protobuf:"bytes,7,opt,name=extra,proto3" json:"extra,omitempty"` +} + +func (x *ControllerStatusResponse) Reset() { + *x = ControllerStatusResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControllerStatusResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControllerStatusResponse) ProtoMessage() {} + +func (x *ControllerStatusResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControllerStatusResponse.ProtoReflect.Descriptor instead. +func (*ControllerStatusResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{21} +} + +func (x *ControllerStatusResponse) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +func (x *ControllerStatusResponse) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} + +func (x *ControllerStatusResponse) GetState() string { + if x != nil { + return x.State + } + return "" +} + +func (x *ControllerStatusResponse) GetInfo() map[string]string { + if x != nil { + return x.Info + } + return nil +} + +func (x *ControllerStatusResponse) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil +} + +func (x *ControllerStatusResponse) GetExitedAt() *timestamppb.Timestamp { + if x != nil { + return x.ExitedAt + } + return nil +} + +func (x *ControllerStatusResponse) GetExtra() *anypb.Any { + if x != nil { + return x.Extra + } + return nil +} + +type ControllerShutdownRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` +} + +func (x *ControllerShutdownRequest) Reset() { + *x = ControllerShutdownRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControllerShutdownRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControllerShutdownRequest) ProtoMessage() {} + +func (x *ControllerShutdownRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControllerShutdownRequest.ProtoReflect.Descriptor instead. +func (*ControllerShutdownRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{22} +} + +func (x *ControllerShutdownRequest) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +type ControllerShutdownResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ControllerShutdownResponse) Reset() { + *x = ControllerShutdownResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ControllerShutdownResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ControllerShutdownResponse) ProtoMessage() {} + +func (x *ControllerShutdownResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ControllerShutdownResponse.ProtoReflect.Descriptor instead. +func (*ControllerShutdownResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP(), []int{23} +} + +var File_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDesc = []byte{ + 0x0a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x61, 0x6e, 0x64, 0x62, + 0x6f, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, + 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2f, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x36, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6d, 0x6f, 0x75, 0x6e, 0x74, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2f, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x49, 0x0a, 0x12, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, + 0x6f, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x61, 0x6e, 0x64, + 0x62, 0x6f, 0x78, 0x52, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x22, 0x4a, 0x0a, 0x13, + 0x53, 0x74, 0x6f, 0x72, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, + 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x22, 0x61, 0x0a, 0x12, 0x53, 0x74, 0x6f, 0x72, + 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x33, + 0x0a, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x07, 0x73, 0x61, 0x6e, 0x64, + 0x62, 0x6f, 0x78, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x22, 0x4a, 0x0a, 0x13, 0x53, + 0x74, 0x6f, 0x72, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x07, + 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x22, 0x33, 0x0a, 0x12, 0x53, 0x74, 0x6f, 0x72, 0x65, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, + 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x15, 0x0a, 0x13, + 0x53, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x2c, 0x0a, 0x10, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x73, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x73, 0x22, 0x42, 0x0a, 0x11, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, + 0x04, 0x6c, 0x69, 0x73, 0x74, 0x22, 0x30, 0x0a, 0x0f, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x47, 0x65, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, + 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, + 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x47, 0x0a, 0x10, 0x53, 0x74, 0x6f, 0x72, 0x65, + 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x73, + 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, + 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x52, 0x07, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, + 0x22, 0x99, 0x01, 0x0a, 0x17, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, + 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x2f, 0x0a, 0x06, 0x72, + 0x6f, 0x6f, 0x74, 0x66, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4d, + 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x06, 0x72, 0x6f, 0x6f, 0x74, 0x66, 0x73, 0x12, 0x2e, 0x0a, 0x07, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x41, 0x6e, 0x79, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x39, 0x0a, 0x18, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, + 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, + 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x37, 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, + 0x22, 0x9d, 0x02, 0x0a, 0x17, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, + 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, + 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x39, 0x0a, + 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x5b, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, + 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x3a, 0x0a, 0x19, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x50, 0x6c, + 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, + 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x54, 0x0a, 0x1a, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, + 0x72, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x08, 0x70, 0x6c, + 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, + 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, + 0x72, 0x6d, 0x22, 0x59, 0x0a, 0x15, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, + 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, + 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x73, 0x22, 0x18, 0x0a, + 0x16, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x6f, 0x70, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x36, 0x0a, 0x15, 0x43, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x57, 0x61, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, + 0x72, 0x0a, 0x16, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x57, 0x61, 0x69, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x69, + 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, + 0x65, 0x78, 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x09, 0x65, 0x78, + 0x69, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, 0x65, + 0x64, 0x41, 0x74, 0x22, 0x52, 0x0a, 0x17, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, + 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, + 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x12, 0x18, 0x0a, + 0x07, 0x76, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, + 0x76, 0x65, 0x72, 0x62, 0x6f, 0x73, 0x65, 0x22, 0x92, 0x03, 0x0a, 0x18, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, + 0x78, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x56, 0x0a, 0x04, 0x69, + 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, + 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x69, + 0x6e, 0x66, 0x6f, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x37, + 0x0a, 0x09, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, + 0x78, 0x69, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x78, 0x74, 0x72, 0x61, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x65, 0x78, + 0x74, 0x72, 0x61, 0x1a, 0x37, 0x0a, 0x09, 0x49, 0x6e, 0x66, 0x6f, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3a, 0x0a, 0x19, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, + 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, + 0x64, 0x62, 0x6f, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, + 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x49, 0x64, 0x22, 0x1c, 0x0a, 0x1a, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xb7, 0x04, 0x0a, 0x05, 0x53, 0x74, 0x6f, 0x72, 0x65, + 0x12, 0x71, 0x0a, 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x32, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x6f, 0x72, + 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x74, 0x6f, 0x72, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x71, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x32, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, + 0x74, 0x6f, 0x72, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x33, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x71, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x12, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, + 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6b, 0x0a, 0x04, 0x4c, 0x69, 0x73, + 0x74, 0x12, 0x30, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, + 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x68, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x2f, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x53, + 0x74, 0x6f, 0x72, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x74, 0x6f, 0x72, 0x65, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x32, 0xf6, 0x06, 0x0a, 0x0a, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x12, + 0x7b, 0x0a, 0x06, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x37, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, + 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x05, + 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x36, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, + 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, + 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x81, 0x01, 0x0a, 0x08, 0x50, 0x6c, 0x61, 0x74, 0x66, + 0x6f, 0x72, 0x6d, 0x12, 0x39, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, + 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x50, + 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, + 0x72, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x75, 0x0a, 0x04, 0x53, 0x74, + 0x6f, 0x70, 0x12, 0x35, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, + 0x6f, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, + 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x6f, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x75, 0x0a, 0x04, 0x57, 0x61, 0x69, 0x74, 0x12, 0x35, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, + 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x57, 0x61, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x36, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, + 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x57, 0x61, 0x69, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x37, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x81, 0x01, 0x0a, 0x08, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, + 0x77, 0x6e, 0x12, 0x39, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x68, + 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x76, 0x31, 0x2e, 0x43, + 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x42, 0x5a, 0x40, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x73, 0x61, 0x6e, 0x64, 0x62, + 0x6f, 0x78, 0x2f, 0x76, 0x31, 0x3b, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescData = file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDesc +) + +func file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes = make([]protoimpl.MessageInfo, 26) +var file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_goTypes = []interface{}{ + (*StoreCreateRequest)(nil), // 0: containerd.services.sandbox.v1.StoreCreateRequest + (*StoreCreateResponse)(nil), // 1: containerd.services.sandbox.v1.StoreCreateResponse + (*StoreUpdateRequest)(nil), // 2: containerd.services.sandbox.v1.StoreUpdateRequest + (*StoreUpdateResponse)(nil), // 3: containerd.services.sandbox.v1.StoreUpdateResponse + (*StoreDeleteRequest)(nil), // 4: containerd.services.sandbox.v1.StoreDeleteRequest + (*StoreDeleteResponse)(nil), // 5: containerd.services.sandbox.v1.StoreDeleteResponse + (*StoreListRequest)(nil), // 6: containerd.services.sandbox.v1.StoreListRequest + (*StoreListResponse)(nil), // 7: containerd.services.sandbox.v1.StoreListResponse + (*StoreGetRequest)(nil), // 8: containerd.services.sandbox.v1.StoreGetRequest + (*StoreGetResponse)(nil), // 9: containerd.services.sandbox.v1.StoreGetResponse + (*ControllerCreateRequest)(nil), // 10: containerd.services.sandbox.v1.ControllerCreateRequest + (*ControllerCreateResponse)(nil), // 11: containerd.services.sandbox.v1.ControllerCreateResponse + (*ControllerStartRequest)(nil), // 12: containerd.services.sandbox.v1.ControllerStartRequest + (*ControllerStartResponse)(nil), // 13: containerd.services.sandbox.v1.ControllerStartResponse + (*ControllerPlatformRequest)(nil), // 14: containerd.services.sandbox.v1.ControllerPlatformRequest + (*ControllerPlatformResponse)(nil), // 15: containerd.services.sandbox.v1.ControllerPlatformResponse + (*ControllerStopRequest)(nil), // 16: containerd.services.sandbox.v1.ControllerStopRequest + (*ControllerStopResponse)(nil), // 17: containerd.services.sandbox.v1.ControllerStopResponse + (*ControllerWaitRequest)(nil), // 18: containerd.services.sandbox.v1.ControllerWaitRequest + (*ControllerWaitResponse)(nil), // 19: containerd.services.sandbox.v1.ControllerWaitResponse + (*ControllerStatusRequest)(nil), // 20: containerd.services.sandbox.v1.ControllerStatusRequest + (*ControllerStatusResponse)(nil), // 21: containerd.services.sandbox.v1.ControllerStatusResponse + (*ControllerShutdownRequest)(nil), // 22: containerd.services.sandbox.v1.ControllerShutdownRequest + (*ControllerShutdownResponse)(nil), // 23: containerd.services.sandbox.v1.ControllerShutdownResponse + nil, // 24: containerd.services.sandbox.v1.ControllerStartResponse.LabelsEntry + nil, // 25: containerd.services.sandbox.v1.ControllerStatusResponse.InfoEntry + (*types.Sandbox)(nil), // 26: containerd.types.Sandbox + (*types.Mount)(nil), // 27: containerd.types.Mount + (*anypb.Any)(nil), // 28: google.protobuf.Any + (*timestamppb.Timestamp)(nil), // 29: google.protobuf.Timestamp + (*types.Platform)(nil), // 30: containerd.types.Platform +} +var file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_depIdxs = []int32{ + 26, // 0: containerd.services.sandbox.v1.StoreCreateRequest.sandbox:type_name -> containerd.types.Sandbox + 26, // 1: containerd.services.sandbox.v1.StoreCreateResponse.sandbox:type_name -> containerd.types.Sandbox + 26, // 2: containerd.services.sandbox.v1.StoreUpdateRequest.sandbox:type_name -> containerd.types.Sandbox + 26, // 3: containerd.services.sandbox.v1.StoreUpdateResponse.sandbox:type_name -> containerd.types.Sandbox + 26, // 4: containerd.services.sandbox.v1.StoreListResponse.list:type_name -> containerd.types.Sandbox + 26, // 5: containerd.services.sandbox.v1.StoreGetResponse.sandbox:type_name -> containerd.types.Sandbox + 27, // 6: containerd.services.sandbox.v1.ControllerCreateRequest.rootfs:type_name -> containerd.types.Mount + 28, // 7: containerd.services.sandbox.v1.ControllerCreateRequest.options:type_name -> google.protobuf.Any + 29, // 8: containerd.services.sandbox.v1.ControllerStartResponse.created_at:type_name -> google.protobuf.Timestamp + 24, // 9: containerd.services.sandbox.v1.ControllerStartResponse.labels:type_name -> containerd.services.sandbox.v1.ControllerStartResponse.LabelsEntry + 30, // 10: containerd.services.sandbox.v1.ControllerPlatformResponse.platform:type_name -> containerd.types.Platform + 29, // 11: containerd.services.sandbox.v1.ControllerWaitResponse.exited_at:type_name -> google.protobuf.Timestamp + 25, // 12: containerd.services.sandbox.v1.ControllerStatusResponse.info:type_name -> containerd.services.sandbox.v1.ControllerStatusResponse.InfoEntry + 29, // 13: containerd.services.sandbox.v1.ControllerStatusResponse.created_at:type_name -> google.protobuf.Timestamp + 29, // 14: containerd.services.sandbox.v1.ControllerStatusResponse.exited_at:type_name -> google.protobuf.Timestamp + 28, // 15: containerd.services.sandbox.v1.ControllerStatusResponse.extra:type_name -> google.protobuf.Any + 0, // 16: containerd.services.sandbox.v1.Store.Create:input_type -> containerd.services.sandbox.v1.StoreCreateRequest + 2, // 17: containerd.services.sandbox.v1.Store.Update:input_type -> containerd.services.sandbox.v1.StoreUpdateRequest + 4, // 18: containerd.services.sandbox.v1.Store.Delete:input_type -> containerd.services.sandbox.v1.StoreDeleteRequest + 6, // 19: containerd.services.sandbox.v1.Store.List:input_type -> containerd.services.sandbox.v1.StoreListRequest + 8, // 20: containerd.services.sandbox.v1.Store.Get:input_type -> containerd.services.sandbox.v1.StoreGetRequest + 10, // 21: containerd.services.sandbox.v1.Controller.Create:input_type -> containerd.services.sandbox.v1.ControllerCreateRequest + 12, // 22: containerd.services.sandbox.v1.Controller.Start:input_type -> containerd.services.sandbox.v1.ControllerStartRequest + 14, // 23: containerd.services.sandbox.v1.Controller.Platform:input_type -> containerd.services.sandbox.v1.ControllerPlatformRequest + 16, // 24: containerd.services.sandbox.v1.Controller.Stop:input_type -> containerd.services.sandbox.v1.ControllerStopRequest + 18, // 25: containerd.services.sandbox.v1.Controller.Wait:input_type -> containerd.services.sandbox.v1.ControllerWaitRequest + 20, // 26: containerd.services.sandbox.v1.Controller.Status:input_type -> containerd.services.sandbox.v1.ControllerStatusRequest + 22, // 27: containerd.services.sandbox.v1.Controller.Shutdown:input_type -> containerd.services.sandbox.v1.ControllerShutdownRequest + 1, // 28: containerd.services.sandbox.v1.Store.Create:output_type -> containerd.services.sandbox.v1.StoreCreateResponse + 3, // 29: containerd.services.sandbox.v1.Store.Update:output_type -> containerd.services.sandbox.v1.StoreUpdateResponse + 5, // 30: containerd.services.sandbox.v1.Store.Delete:output_type -> containerd.services.sandbox.v1.StoreDeleteResponse + 7, // 31: containerd.services.sandbox.v1.Store.List:output_type -> containerd.services.sandbox.v1.StoreListResponse + 9, // 32: containerd.services.sandbox.v1.Store.Get:output_type -> containerd.services.sandbox.v1.StoreGetResponse + 11, // 33: containerd.services.sandbox.v1.Controller.Create:output_type -> containerd.services.sandbox.v1.ControllerCreateResponse + 13, // 34: containerd.services.sandbox.v1.Controller.Start:output_type -> containerd.services.sandbox.v1.ControllerStartResponse + 15, // 35: containerd.services.sandbox.v1.Controller.Platform:output_type -> containerd.services.sandbox.v1.ControllerPlatformResponse + 17, // 36: containerd.services.sandbox.v1.Controller.Stop:output_type -> containerd.services.sandbox.v1.ControllerStopResponse + 19, // 37: containerd.services.sandbox.v1.Controller.Wait:output_type -> containerd.services.sandbox.v1.ControllerWaitResponse + 21, // 38: containerd.services.sandbox.v1.Controller.Status:output_type -> containerd.services.sandbox.v1.ControllerStatusResponse + 23, // 39: containerd.services.sandbox.v1.Controller.Shutdown:output_type -> containerd.services.sandbox.v1.ControllerShutdownResponse + 28, // [28:40] is the sub-list for method output_type + 16, // [16:28] is the sub-list for method input_type + 16, // [16:16] is the sub-list for extension type_name + 16, // [16:16] is the sub-list for extension extendee + 0, // [0:16] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_init() } +func file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_init() { + if File_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StoreCreateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StoreCreateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StoreUpdateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StoreUpdateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StoreDeleteRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StoreDeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StoreListRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StoreListResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StoreGetRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StoreGetResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerCreateRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerCreateResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerStartRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerStartResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerPlatformRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerPlatformResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerStopRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerStopResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerWaitRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerWaitResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerStatusRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerStatusResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerShutdownRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ControllerShutdownResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDesc, + NumEnums: 0, + NumMessages: 26, + NumExtensions: 0, + NumServices: 2, + }, + GoTypes: file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto = out.File + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_sandbox_v1_sandbox_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox.proto b/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox.proto new file mode 100644 index 0000000000000..d234c72e49dc0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox.proto @@ -0,0 +1,162 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +syntax = "proto3"; + +// Sandbox is a v2 runtime extension that allows more complex execution environments for containers. +// This adds a notion of groups of containers that share same lifecycle and/or resources. +// A few good fits for sandbox can be: +// - A "pause" container in k8s, that acts as a parent process for child containers to hold network namespace. +// - (micro)VMs that launch a VM process and executes containers inside guest OS. +// containerd in this case remains implementation agnostic and delegates sandbox handling to runtimes. +// See proposal and discussion here: https://github.com/containerd/containerd/issues/4131 +package containerd.services.sandbox.v1; + +import "google/protobuf/any.proto"; +import "google/protobuf/timestamp.proto"; + +import "github.com/containerd/containerd/api/types/sandbox.proto"; +import "github.com/containerd/containerd/api/types/mount.proto"; +import "github.com/containerd/containerd/api/types/platform.proto"; + +option go_package = "github.com/containerd/containerd/api/services/sandbox/v1;sandbox"; + +// Store provides a metadata storage interface for sandboxes. Similarly to `Containers`, +// sandbox object includes info required to start a new instance, but no runtime state. +// When running a new sandbox instance, store objects are used as base type to create from. +service Store { + rpc Create(StoreCreateRequest) returns (StoreCreateResponse); + rpc Update(StoreUpdateRequest) returns (StoreUpdateResponse); + rpc Delete(StoreDeleteRequest) returns (StoreDeleteResponse); + rpc List(StoreListRequest) returns (StoreListResponse); + rpc Get(StoreGetRequest) returns (StoreGetResponse); +} + +message StoreCreateRequest { + containerd.types.Sandbox sandbox = 1; +} + +message StoreCreateResponse { + containerd.types.Sandbox sandbox = 1; +} + +message StoreUpdateRequest { + containerd.types.Sandbox sandbox = 1; + repeated string fields = 2; +} + +message StoreUpdateResponse { + containerd.types.Sandbox sandbox = 1; +} + +message StoreDeleteRequest { + string sandbox_id = 1; +} + +message StoreDeleteResponse {} + +message StoreListRequest { + repeated string filters = 1; +} + +message StoreListResponse { + repeated containerd.types.Sandbox list = 1; +} + +message StoreGetRequest { + string sandbox_id = 1; +} + +message StoreGetResponse { + containerd.types.Sandbox sandbox = 1; +} + +// Controller is an interface to manage runtime sandbox instances. +service Controller { + rpc Create(ControllerCreateRequest) returns (ControllerCreateResponse); + rpc Start(ControllerStartRequest) returns (ControllerStartResponse); + rpc Platform(ControllerPlatformRequest) returns (ControllerPlatformResponse); + rpc Stop(ControllerStopRequest) returns (ControllerStopResponse); + rpc Wait(ControllerWaitRequest) returns (ControllerWaitResponse); + rpc Status(ControllerStatusRequest) returns (ControllerStatusResponse); + rpc Shutdown(ControllerShutdownRequest) returns (ControllerShutdownResponse); +} + +message ControllerCreateRequest { + string sandbox_id = 1; + repeated containerd.types.Mount rootfs = 2; + google.protobuf.Any options = 3; +} + +message ControllerCreateResponse { + string sandbox_id = 1; +} + +message ControllerStartRequest { + string sandbox_id = 1; +} + +message ControllerStartResponse { + string sandbox_id = 1; + uint32 pid = 2; + google.protobuf.Timestamp created_at = 3; + map labels = 4; +} + +message ControllerPlatformRequest { + string sandbox_id = 1; +} + +message ControllerPlatformResponse { + containerd.types.Platform platform = 1; +} + +message ControllerStopRequest { + string sandbox_id = 1; + uint32 timeout_secs = 2; +} + +message ControllerStopResponse {} + +message ControllerWaitRequest { + string sandbox_id = 1; +} + +message ControllerWaitResponse { + uint32 exit_status = 1; + google.protobuf.Timestamp exited_at = 2; +} + +message ControllerStatusRequest { + string sandbox_id = 1; + bool verbose = 2; +} + +message ControllerStatusResponse { + string sandbox_id = 1; + uint32 pid = 2; + string state = 3; + map info = 4; + google.protobuf.Timestamp created_at = 5; + google.protobuf.Timestamp exited_at = 6; + google.protobuf.Any extra = 7; +} + +message ControllerShutdownRequest { + string sandbox_id = 1; +} + +message ControllerShutdownResponse {} diff --git a/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox_grpc.pb.go new file mode 100644 index 0000000000000..16d746d75cf72 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/sandbox/v1/sandbox_grpc.pb.go @@ -0,0 +1,551 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/sandbox/v1/sandbox.proto + +package sandbox + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// StoreClient is the client API for Store service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type StoreClient interface { + Create(ctx context.Context, in *StoreCreateRequest, opts ...grpc.CallOption) (*StoreCreateResponse, error) + Update(ctx context.Context, in *StoreUpdateRequest, opts ...grpc.CallOption) (*StoreUpdateResponse, error) + Delete(ctx context.Context, in *StoreDeleteRequest, opts ...grpc.CallOption) (*StoreDeleteResponse, error) + List(ctx context.Context, in *StoreListRequest, opts ...grpc.CallOption) (*StoreListResponse, error) + Get(ctx context.Context, in *StoreGetRequest, opts ...grpc.CallOption) (*StoreGetResponse, error) +} + +type storeClient struct { + cc grpc.ClientConnInterface +} + +func NewStoreClient(cc grpc.ClientConnInterface) StoreClient { + return &storeClient{cc} +} + +func (c *storeClient) Create(ctx context.Context, in *StoreCreateRequest, opts ...grpc.CallOption) (*StoreCreateResponse, error) { + out := new(StoreCreateResponse) + err := c.cc.Invoke(ctx, "/containerd.services.sandbox.v1.Store/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storeClient) Update(ctx context.Context, in *StoreUpdateRequest, opts ...grpc.CallOption) (*StoreUpdateResponse, error) { + out := new(StoreUpdateResponse) + err := c.cc.Invoke(ctx, "/containerd.services.sandbox.v1.Store/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storeClient) Delete(ctx context.Context, in *StoreDeleteRequest, opts ...grpc.CallOption) (*StoreDeleteResponse, error) { + out := new(StoreDeleteResponse) + err := c.cc.Invoke(ctx, "/containerd.services.sandbox.v1.Store/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storeClient) List(ctx context.Context, in *StoreListRequest, opts ...grpc.CallOption) (*StoreListResponse, error) { + out := new(StoreListResponse) + err := c.cc.Invoke(ctx, "/containerd.services.sandbox.v1.Store/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storeClient) Get(ctx context.Context, in *StoreGetRequest, opts ...grpc.CallOption) (*StoreGetResponse, error) { + out := new(StoreGetResponse) + err := c.cc.Invoke(ctx, "/containerd.services.sandbox.v1.Store/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// StoreServer is the server API for Store service. +// All implementations must embed UnimplementedStoreServer +// for forward compatibility +type StoreServer interface { + Create(context.Context, *StoreCreateRequest) (*StoreCreateResponse, error) + Update(context.Context, *StoreUpdateRequest) (*StoreUpdateResponse, error) + Delete(context.Context, *StoreDeleteRequest) (*StoreDeleteResponse, error) + List(context.Context, *StoreListRequest) (*StoreListResponse, error) + Get(context.Context, *StoreGetRequest) (*StoreGetResponse, error) + mustEmbedUnimplementedStoreServer() +} + +// UnimplementedStoreServer must be embedded to have forward compatible implementations. +type UnimplementedStoreServer struct { +} + +func (UnimplementedStoreServer) Create(context.Context, *StoreCreateRequest) (*StoreCreateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") +} +func (UnimplementedStoreServer) Update(context.Context, *StoreUpdateRequest) (*StoreUpdateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") +} +func (UnimplementedStoreServer) Delete(context.Context, *StoreDeleteRequest) (*StoreDeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (UnimplementedStoreServer) List(context.Context, *StoreListRequest) (*StoreListResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method List not implemented") +} +func (UnimplementedStoreServer) Get(context.Context, *StoreGetRequest) (*StoreGetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (UnimplementedStoreServer) mustEmbedUnimplementedStoreServer() {} + +// UnsafeStoreServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to StoreServer will +// result in compilation errors. +type UnsafeStoreServer interface { + mustEmbedUnimplementedStoreServer() +} + +func RegisterStoreServer(s grpc.ServiceRegistrar, srv StoreServer) { + s.RegisterService(&Store_ServiceDesc, srv) +} + +func _Store_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StoreCreateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StoreServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.sandbox.v1.Store/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StoreServer).Create(ctx, req.(*StoreCreateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Store_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StoreUpdateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StoreServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.sandbox.v1.Store/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StoreServer).Update(ctx, req.(*StoreUpdateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Store_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StoreDeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StoreServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.sandbox.v1.Store/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StoreServer).Delete(ctx, req.(*StoreDeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Store_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StoreListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StoreServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.sandbox.v1.Store/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StoreServer).List(ctx, req.(*StoreListRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Store_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StoreGetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(StoreServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.sandbox.v1.Store/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(StoreServer).Get(ctx, req.(*StoreGetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Store_ServiceDesc is the grpc.ServiceDesc for Store service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Store_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.sandbox.v1.Store", + HandlerType: (*StoreServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Create", + Handler: _Store_Create_Handler, + }, + { + MethodName: "Update", + Handler: _Store_Update_Handler, + }, + { + MethodName: "Delete", + Handler: _Store_Delete_Handler, + }, + { + MethodName: "List", + Handler: _Store_List_Handler, + }, + { + MethodName: "Get", + Handler: _Store_Get_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/containerd/containerd/api/services/sandbox/v1/sandbox.proto", +} + +// ControllerClient is the client API for Controller service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ControllerClient interface { + Create(ctx context.Context, in *ControllerCreateRequest, opts ...grpc.CallOption) (*ControllerCreateResponse, error) + Start(ctx context.Context, in *ControllerStartRequest, opts ...grpc.CallOption) (*ControllerStartResponse, error) + Platform(ctx context.Context, in *ControllerPlatformRequest, opts ...grpc.CallOption) (*ControllerPlatformResponse, error) + Stop(ctx context.Context, in *ControllerStopRequest, opts ...grpc.CallOption) (*ControllerStopResponse, error) + Wait(ctx context.Context, in *ControllerWaitRequest, opts ...grpc.CallOption) (*ControllerWaitResponse, error) + Status(ctx context.Context, in *ControllerStatusRequest, opts ...grpc.CallOption) (*ControllerStatusResponse, error) + Shutdown(ctx context.Context, in *ControllerShutdownRequest, opts ...grpc.CallOption) (*ControllerShutdownResponse, error) +} + +type controllerClient struct { + cc grpc.ClientConnInterface +} + +func NewControllerClient(cc grpc.ClientConnInterface) ControllerClient { + return &controllerClient{cc} +} + +func (c *controllerClient) Create(ctx context.Context, in *ControllerCreateRequest, opts ...grpc.CallOption) (*ControllerCreateResponse, error) { + out := new(ControllerCreateResponse) + err := c.cc.Invoke(ctx, "/containerd.services.sandbox.v1.Controller/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) Start(ctx context.Context, in *ControllerStartRequest, opts ...grpc.CallOption) (*ControllerStartResponse, error) { + out := new(ControllerStartResponse) + err := c.cc.Invoke(ctx, "/containerd.services.sandbox.v1.Controller/Start", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) Platform(ctx context.Context, in *ControllerPlatformRequest, opts ...grpc.CallOption) (*ControllerPlatformResponse, error) { + out := new(ControllerPlatformResponse) + err := c.cc.Invoke(ctx, "/containerd.services.sandbox.v1.Controller/Platform", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) Stop(ctx context.Context, in *ControllerStopRequest, opts ...grpc.CallOption) (*ControllerStopResponse, error) { + out := new(ControllerStopResponse) + err := c.cc.Invoke(ctx, "/containerd.services.sandbox.v1.Controller/Stop", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) Wait(ctx context.Context, in *ControllerWaitRequest, opts ...grpc.CallOption) (*ControllerWaitResponse, error) { + out := new(ControllerWaitResponse) + err := c.cc.Invoke(ctx, "/containerd.services.sandbox.v1.Controller/Wait", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) Status(ctx context.Context, in *ControllerStatusRequest, opts ...grpc.CallOption) (*ControllerStatusResponse, error) { + out := new(ControllerStatusResponse) + err := c.cc.Invoke(ctx, "/containerd.services.sandbox.v1.Controller/Status", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controllerClient) Shutdown(ctx context.Context, in *ControllerShutdownRequest, opts ...grpc.CallOption) (*ControllerShutdownResponse, error) { + out := new(ControllerShutdownResponse) + err := c.cc.Invoke(ctx, "/containerd.services.sandbox.v1.Controller/Shutdown", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ControllerServer is the server API for Controller service. +// All implementations must embed UnimplementedControllerServer +// for forward compatibility +type ControllerServer interface { + Create(context.Context, *ControllerCreateRequest) (*ControllerCreateResponse, error) + Start(context.Context, *ControllerStartRequest) (*ControllerStartResponse, error) + Platform(context.Context, *ControllerPlatformRequest) (*ControllerPlatformResponse, error) + Stop(context.Context, *ControllerStopRequest) (*ControllerStopResponse, error) + Wait(context.Context, *ControllerWaitRequest) (*ControllerWaitResponse, error) + Status(context.Context, *ControllerStatusRequest) (*ControllerStatusResponse, error) + Shutdown(context.Context, *ControllerShutdownRequest) (*ControllerShutdownResponse, error) + mustEmbedUnimplementedControllerServer() +} + +// UnimplementedControllerServer must be embedded to have forward compatible implementations. +type UnimplementedControllerServer struct { +} + +func (UnimplementedControllerServer) Create(context.Context, *ControllerCreateRequest) (*ControllerCreateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") +} +func (UnimplementedControllerServer) Start(context.Context, *ControllerStartRequest) (*ControllerStartResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Start not implemented") +} +func (UnimplementedControllerServer) Platform(context.Context, *ControllerPlatformRequest) (*ControllerPlatformResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Platform not implemented") +} +func (UnimplementedControllerServer) Stop(context.Context, *ControllerStopRequest) (*ControllerStopResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Stop not implemented") +} +func (UnimplementedControllerServer) Wait(context.Context, *ControllerWaitRequest) (*ControllerWaitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Wait not implemented") +} +func (UnimplementedControllerServer) Status(context.Context, *ControllerStatusRequest) (*ControllerStatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") +} +func (UnimplementedControllerServer) Shutdown(context.Context, *ControllerShutdownRequest) (*ControllerShutdownResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Shutdown not implemented") +} +func (UnimplementedControllerServer) mustEmbedUnimplementedControllerServer() {} + +// UnsafeControllerServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ControllerServer will +// result in compilation errors. +type UnsafeControllerServer interface { + mustEmbedUnimplementedControllerServer() +} + +func RegisterControllerServer(s grpc.ServiceRegistrar, srv ControllerServer) { + s.RegisterService(&Controller_ServiceDesc, srv) +} + +func _Controller_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerCreateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.sandbox.v1.Controller/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).Create(ctx, req.(*ControllerCreateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerStartRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).Start(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.sandbox.v1.Controller/Start", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).Start(ctx, req.(*ControllerStartRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_Platform_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerPlatformRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).Platform(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.sandbox.v1.Controller/Platform", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).Platform(ctx, req.(*ControllerPlatformRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerStopRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).Stop(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.sandbox.v1.Controller/Stop", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).Stop(ctx, req.(*ControllerStopRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_Wait_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerWaitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).Wait(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.sandbox.v1.Controller/Wait", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).Wait(ctx, req.(*ControllerWaitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).Status(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.sandbox.v1.Controller/Status", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).Status(ctx, req.(*ControllerStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Controller_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ControllerShutdownRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControllerServer).Shutdown(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.sandbox.v1.Controller/Shutdown", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControllerServer).Shutdown(ctx, req.(*ControllerShutdownRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Controller_ServiceDesc is the grpc.ServiceDesc for Controller service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Controller_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.sandbox.v1.Controller", + HandlerType: (*ControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Create", + Handler: _Controller_Create_Handler, + }, + { + MethodName: "Start", + Handler: _Controller_Start_Handler, + }, + { + MethodName: "Platform", + Handler: _Controller_Platform_Handler, + }, + { + MethodName: "Stop", + Handler: _Controller_Stop_Handler, + }, + { + MethodName: "Wait", + Handler: _Controller_Wait_Handler, + }, + { + MethodName: "Status", + Handler: _Controller_Status_Handler, + }, + { + MethodName: "Shutdown", + Handler: _Controller_Shutdown_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/containerd/containerd/api/services/sandbox/v1/sandbox.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go b/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go index 046c97b015fee..b7cec8048bb3a 100644 --- a/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.pb.go @@ -1,617 +1,884 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto package snapshots import ( - context "context" - fmt "fmt" types "github.com/containerd/containerd/api/types" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types1 "github.com/gogo/protobuf/types" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - strings "strings" - time "time" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type Kind int32 const ( - KindUnknown Kind = 0 - KindView Kind = 1 - KindActive Kind = 2 - KindCommitted Kind = 3 + Kind_UNKNOWN Kind = 0 + Kind_VIEW Kind = 1 + Kind_ACTIVE Kind = 2 + Kind_COMMITTED Kind = 3 ) -var Kind_name = map[int32]string{ - 0: "UNKNOWN", - 1: "VIEW", - 2: "ACTIVE", - 3: "COMMITTED", -} +// Enum value maps for Kind. +var ( + Kind_name = map[int32]string{ + 0: "UNKNOWN", + 1: "VIEW", + 2: "ACTIVE", + 3: "COMMITTED", + } + Kind_value = map[string]int32{ + "UNKNOWN": 0, + "VIEW": 1, + "ACTIVE": 2, + "COMMITTED": 3, + } +) -var Kind_value = map[string]int32{ - "UNKNOWN": 0, - "VIEW": 1, - "ACTIVE": 2, - "COMMITTED": 3, +func (x Kind) Enum() *Kind { + p := new(Kind) + *p = x + return p } func (x Kind) String() string { - return proto.EnumName(Kind_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Kind) Descriptor() protoreflect.EnumDescriptor { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_enumTypes[0].Descriptor() +} + +func (Kind) Type() protoreflect.EnumType { + return &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_enumTypes[0] +} + +func (x Kind) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) } +// Deprecated: Use Kind.Descriptor instead. func (Kind) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{0} + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{0} } type PrepareSnapshotRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` Parent string `protobuf:"bytes,3,opt,name=parent,proto3" json:"parent,omitempty"` // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. - Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *PrepareSnapshotRequest) Reset() { + *x = PrepareSnapshotRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PrepareSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *PrepareSnapshotRequest) Reset() { *m = PrepareSnapshotRequest{} } func (*PrepareSnapshotRequest) ProtoMessage() {} -func (*PrepareSnapshotRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{0} -} -func (m *PrepareSnapshotRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PrepareSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PrepareSnapshotRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *PrepareSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) +} + +// Deprecated: Use PrepareSnapshotRequest.ProtoReflect.Descriptor instead. +func (*PrepareSnapshotRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{0} } -func (m *PrepareSnapshotRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrepareSnapshotRequest.Merge(m, src) + +func (x *PrepareSnapshotRequest) GetSnapshotter() string { + if x != nil { + return x.Snapshotter + } + return "" } -func (m *PrepareSnapshotRequest) XXX_Size() int { - return m.Size() + +func (x *PrepareSnapshotRequest) GetKey() string { + if x != nil { + return x.Key + } + return "" } -func (m *PrepareSnapshotRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PrepareSnapshotRequest.DiscardUnknown(m) + +func (x *PrepareSnapshotRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" } -var xxx_messageInfo_PrepareSnapshotRequest proto.InternalMessageInfo +func (x *PrepareSnapshotRequest) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} type PrepareSnapshotResponse struct { - Mounts []*types.Mount `protobuf:"bytes,1,rep,name=mounts,proto3" json:"mounts,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Mounts []*types.Mount `protobuf:"bytes,1,rep,name=mounts,proto3" json:"mounts,omitempty"` } -func (m *PrepareSnapshotResponse) Reset() { *m = PrepareSnapshotResponse{} } -func (*PrepareSnapshotResponse) ProtoMessage() {} -func (*PrepareSnapshotResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{1} -} -func (m *PrepareSnapshotResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PrepareSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PrepareSnapshotResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *PrepareSnapshotResponse) Reset() { + *x = PrepareSnapshotResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *PrepareSnapshotResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_PrepareSnapshotResponse.Merge(m, src) + +func (x *PrepareSnapshotResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *PrepareSnapshotResponse) XXX_Size() int { - return m.Size() + +func (*PrepareSnapshotResponse) ProtoMessage() {} + +func (x *PrepareSnapshotResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *PrepareSnapshotResponse) XXX_DiscardUnknown() { - xxx_messageInfo_PrepareSnapshotResponse.DiscardUnknown(m) + +// Deprecated: Use PrepareSnapshotResponse.ProtoReflect.Descriptor instead. +func (*PrepareSnapshotResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{1} } -var xxx_messageInfo_PrepareSnapshotResponse proto.InternalMessageInfo +func (x *PrepareSnapshotResponse) GetMounts() []*types.Mount { + if x != nil { + return x.Mounts + } + return nil +} type ViewSnapshotRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` Parent string `protobuf:"bytes,3,opt,name=parent,proto3" json:"parent,omitempty"` // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. - Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ViewSnapshotRequest) Reset() { + *x = ViewSnapshotRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ViewSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ViewSnapshotRequest) Reset() { *m = ViewSnapshotRequest{} } func (*ViewSnapshotRequest) ProtoMessage() {} -func (*ViewSnapshotRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{2} -} -func (m *ViewSnapshotRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ViewSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ViewSnapshotRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *ViewSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *ViewSnapshotRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ViewSnapshotRequest.Merge(m, src) + +// Deprecated: Use ViewSnapshotRequest.ProtoReflect.Descriptor instead. +func (*ViewSnapshotRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{2} +} + +func (x *ViewSnapshotRequest) GetSnapshotter() string { + if x != nil { + return x.Snapshotter + } + return "" } -func (m *ViewSnapshotRequest) XXX_Size() int { - return m.Size() + +func (x *ViewSnapshotRequest) GetKey() string { + if x != nil { + return x.Key + } + return "" } -func (m *ViewSnapshotRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ViewSnapshotRequest.DiscardUnknown(m) + +func (x *ViewSnapshotRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" } -var xxx_messageInfo_ViewSnapshotRequest proto.InternalMessageInfo +func (x *ViewSnapshotRequest) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} type ViewSnapshotResponse struct { - Mounts []*types.Mount `protobuf:"bytes,1,rep,name=mounts,proto3" json:"mounts,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Mounts []*types.Mount `protobuf:"bytes,1,rep,name=mounts,proto3" json:"mounts,omitempty"` +} + +func (x *ViewSnapshotResponse) Reset() { + *x = ViewSnapshotResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ViewSnapshotResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ViewSnapshotResponse) Reset() { *m = ViewSnapshotResponse{} } func (*ViewSnapshotResponse) ProtoMessage() {} -func (*ViewSnapshotResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{3} -} -func (m *ViewSnapshotResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ViewSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ViewSnapshotResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *ViewSnapshotResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *ViewSnapshotResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ViewSnapshotResponse.Merge(m, src) + +// Deprecated: Use ViewSnapshotResponse.ProtoReflect.Descriptor instead. +func (*ViewSnapshotResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{3} } -func (m *ViewSnapshotResponse) XXX_Size() int { - return m.Size() + +func (x *ViewSnapshotResponse) GetMounts() []*types.Mount { + if x != nil { + return x.Mounts + } + return nil } -func (m *ViewSnapshotResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ViewSnapshotResponse.DiscardUnknown(m) + +type MountsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` + Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` } -var xxx_messageInfo_ViewSnapshotResponse proto.InternalMessageInfo +func (x *MountsRequest) Reset() { + *x = MountsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type MountsRequest struct { - Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` - Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *MountsRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *MountsRequest) Reset() { *m = MountsRequest{} } func (*MountsRequest) ProtoMessage() {} -func (*MountsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{4} -} -func (m *MountsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MountsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MountsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *MountsRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *MountsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_MountsRequest.Merge(m, src) -} -func (m *MountsRequest) XXX_Size() int { - return m.Size() + +// Deprecated: Use MountsRequest.ProtoReflect.Descriptor instead. +func (*MountsRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{4} } -func (m *MountsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_MountsRequest.DiscardUnknown(m) + +func (x *MountsRequest) GetSnapshotter() string { + if x != nil { + return x.Snapshotter + } + return "" } -var xxx_messageInfo_MountsRequest proto.InternalMessageInfo +func (x *MountsRequest) GetKey() string { + if x != nil { + return x.Key + } + return "" +} type MountsResponse struct { - Mounts []*types.Mount `protobuf:"bytes,1,rep,name=mounts,proto3" json:"mounts,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Mounts []*types.Mount `protobuf:"bytes,1,rep,name=mounts,proto3" json:"mounts,omitempty"` +} + +func (x *MountsResponse) Reset() { + *x = MountsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MountsResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *MountsResponse) Reset() { *m = MountsResponse{} } func (*MountsResponse) ProtoMessage() {} -func (*MountsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{5} -} -func (m *MountsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MountsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MountsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *MountsResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *MountsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MountsResponse.Merge(m, src) + +// Deprecated: Use MountsResponse.ProtoReflect.Descriptor instead. +func (*MountsResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{5} } -func (m *MountsResponse) XXX_Size() int { - return m.Size() + +func (x *MountsResponse) GetMounts() []*types.Mount { + if x != nil { + return x.Mounts + } + return nil } -func (m *MountsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MountsResponse.DiscardUnknown(m) + +type RemoveSnapshotRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` + Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` } -var xxx_messageInfo_MountsResponse proto.InternalMessageInfo +func (x *RemoveSnapshotRequest) Reset() { + *x = RemoveSnapshotRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type RemoveSnapshotRequest struct { - Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` - Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *RemoveSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *RemoveSnapshotRequest) Reset() { *m = RemoveSnapshotRequest{} } func (*RemoveSnapshotRequest) ProtoMessage() {} -func (*RemoveSnapshotRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{6} -} -func (m *RemoveSnapshotRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RemoveSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RemoveSnapshotRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *RemoveSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *RemoveSnapshotRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_RemoveSnapshotRequest.Merge(m, src) -} -func (m *RemoveSnapshotRequest) XXX_Size() int { - return m.Size() + +// Deprecated: Use RemoveSnapshotRequest.ProtoReflect.Descriptor instead. +func (*RemoveSnapshotRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{6} } -func (m *RemoveSnapshotRequest) XXX_DiscardUnknown() { - xxx_messageInfo_RemoveSnapshotRequest.DiscardUnknown(m) + +func (x *RemoveSnapshotRequest) GetSnapshotter() string { + if x != nil { + return x.Snapshotter + } + return "" } -var xxx_messageInfo_RemoveSnapshotRequest proto.InternalMessageInfo +func (x *RemoveSnapshotRequest) GetKey() string { + if x != nil { + return x.Key + } + return "" +} type CommitSnapshotRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` Key string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. - Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *CommitSnapshotRequest) Reset() { + *x = CommitSnapshotRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CommitSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *CommitSnapshotRequest) Reset() { *m = CommitSnapshotRequest{} } func (*CommitSnapshotRequest) ProtoMessage() {} -func (*CommitSnapshotRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{7} -} -func (m *CommitSnapshotRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CommitSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CommitSnapshotRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *CommitSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *CommitSnapshotRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CommitSnapshotRequest.Merge(m, src) + +// Deprecated: Use CommitSnapshotRequest.ProtoReflect.Descriptor instead. +func (*CommitSnapshotRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{7} +} + +func (x *CommitSnapshotRequest) GetSnapshotter() string { + if x != nil { + return x.Snapshotter + } + return "" } -func (m *CommitSnapshotRequest) XXX_Size() int { - return m.Size() + +func (x *CommitSnapshotRequest) GetName() string { + if x != nil { + return x.Name + } + return "" } -func (m *CommitSnapshotRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CommitSnapshotRequest.DiscardUnknown(m) + +func (x *CommitSnapshotRequest) GetKey() string { + if x != nil { + return x.Key + } + return "" } -var xxx_messageInfo_CommitSnapshotRequest proto.InternalMessageInfo +func (x *CommitSnapshotRequest) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} type StatSnapshotRequest struct { - Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` - Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` + Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` +} + +func (x *StatSnapshotRequest) Reset() { + *x = StatSnapshotRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StatSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *StatSnapshotRequest) Reset() { *m = StatSnapshotRequest{} } func (*StatSnapshotRequest) ProtoMessage() {} -func (*StatSnapshotRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{8} -} -func (m *StatSnapshotRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StatSnapshotRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *StatSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *StatSnapshotRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatSnapshotRequest.Merge(m, src) -} -func (m *StatSnapshotRequest) XXX_Size() int { - return m.Size() + +// Deprecated: Use StatSnapshotRequest.ProtoReflect.Descriptor instead. +func (*StatSnapshotRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{8} } -func (m *StatSnapshotRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StatSnapshotRequest.DiscardUnknown(m) + +func (x *StatSnapshotRequest) GetSnapshotter() string { + if x != nil { + return x.Snapshotter + } + return "" } -var xxx_messageInfo_StatSnapshotRequest proto.InternalMessageInfo +func (x *StatSnapshotRequest) GetKey() string { + if x != nil { + return x.Key + } + return "" +} type Info struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Parent string `protobuf:"bytes,2,opt,name=parent,proto3" json:"parent,omitempty"` Kind Kind `protobuf:"varint,3,opt,name=kind,proto3,enum=containerd.services.snapshots.v1.Kind" json:"kind,omitempty"` // CreatedAt provides the time at which the snapshot was created. - CreatedAt time.Time `protobuf:"bytes,4,opt,name=created_at,json=createdAt,proto3,stdtime" json:"created_at"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` // UpdatedAt provides the time the info was last updated. - UpdatedAt time.Time `protobuf:"bytes,5,opt,name=updated_at,json=updatedAt,proto3,stdtime" json:"updated_at"` + UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` // Labels are arbitrary data on snapshots. // // The combined size of a key/value pair cannot exceed 4096 bytes. - Labels map[string]string `protobuf:"bytes,6,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Labels map[string]string `protobuf:"bytes,6,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Info) Reset() { + *x = Info{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Info) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Info) Reset() { *m = Info{} } func (*Info) ProtoMessage() {} -func (*Info) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{9} -} -func (m *Info) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Info) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Info.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *Info) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Info.ProtoReflect.Descriptor instead. +func (*Info) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{9} +} + +func (x *Info) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Info) GetParent() string { + if x != nil { + return x.Parent } + return "" } -func (m *Info) XXX_Merge(src proto.Message) { - xxx_messageInfo_Info.Merge(m, src) + +func (x *Info) GetKind() Kind { + if x != nil { + return x.Kind + } + return Kind_UNKNOWN } -func (m *Info) XXX_Size() int { - return m.Size() + +func (x *Info) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil } -func (m *Info) XXX_DiscardUnknown() { - xxx_messageInfo_Info.DiscardUnknown(m) + +func (x *Info) GetUpdatedAt() *timestamppb.Timestamp { + if x != nil { + return x.UpdatedAt + } + return nil } -var xxx_messageInfo_Info proto.InternalMessageInfo +func (x *Info) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} type StatSnapshotResponse struct { - Info Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Info *Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` } -func (m *StatSnapshotResponse) Reset() { *m = StatSnapshotResponse{} } -func (*StatSnapshotResponse) ProtoMessage() {} -func (*StatSnapshotResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{10} -} -func (m *StatSnapshotResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StatSnapshotResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *StatSnapshotResponse) Reset() { + *x = StatSnapshotResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *StatSnapshotResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatSnapshotResponse.Merge(m, src) + +func (x *StatSnapshotResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *StatSnapshotResponse) XXX_Size() int { - return m.Size() + +func (*StatSnapshotResponse) ProtoMessage() {} + +func (x *StatSnapshotResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *StatSnapshotResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StatSnapshotResponse.DiscardUnknown(m) + +// Deprecated: Use StatSnapshotResponse.ProtoReflect.Descriptor instead. +func (*StatSnapshotResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{10} } -var xxx_messageInfo_StatSnapshotResponse proto.InternalMessageInfo +func (x *StatSnapshotResponse) GetInfo() *Info { + if x != nil { + return x.Info + } + return nil +} type UpdateSnapshotRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` - Info Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info"` + Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. // // In info, Name, Parent, Kind, Created are immutable, // other field may be updated using this mask. // If no mask is provided, all mutable field are updated. - UpdateMask *types1.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateSnapshotRequest) Reset() { + *x = UpdateSnapshotRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *UpdateSnapshotRequest) Reset() { *m = UpdateSnapshotRequest{} } func (*UpdateSnapshotRequest) ProtoMessage() {} -func (*UpdateSnapshotRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{11} -} -func (m *UpdateSnapshotRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UpdateSnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UpdateSnapshotRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *UpdateSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *UpdateSnapshotRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateSnapshotRequest.Merge(m, src) + +// Deprecated: Use UpdateSnapshotRequest.ProtoReflect.Descriptor instead. +func (*UpdateSnapshotRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{11} } -func (m *UpdateSnapshotRequest) XXX_Size() int { - return m.Size() + +func (x *UpdateSnapshotRequest) GetSnapshotter() string { + if x != nil { + return x.Snapshotter + } + return "" } -func (m *UpdateSnapshotRequest) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateSnapshotRequest.DiscardUnknown(m) + +func (x *UpdateSnapshotRequest) GetInfo() *Info { + if x != nil { + return x.Info + } + return nil } -var xxx_messageInfo_UpdateSnapshotRequest proto.InternalMessageInfo +func (x *UpdateSnapshotRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} type UpdateSnapshotResponse struct { - Info Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Info *Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` } -func (m *UpdateSnapshotResponse) Reset() { *m = UpdateSnapshotResponse{} } -func (*UpdateSnapshotResponse) ProtoMessage() {} -func (*UpdateSnapshotResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{12} -} -func (m *UpdateSnapshotResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UpdateSnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UpdateSnapshotResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *UpdateSnapshotResponse) Reset() { + *x = UpdateSnapshotResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *UpdateSnapshotResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateSnapshotResponse.Merge(m, src) + +func (x *UpdateSnapshotResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *UpdateSnapshotResponse) XXX_Size() int { - return m.Size() + +func (*UpdateSnapshotResponse) ProtoMessage() {} + +func (x *UpdateSnapshotResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *UpdateSnapshotResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateSnapshotResponse.DiscardUnknown(m) + +// Deprecated: Use UpdateSnapshotResponse.ProtoReflect.Descriptor instead. +func (*UpdateSnapshotResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{12} } -var xxx_messageInfo_UpdateSnapshotResponse proto.InternalMessageInfo +func (x *UpdateSnapshotResponse) GetInfo() *Info { + if x != nil { + return x.Info + } + return nil +} type ListSnapshotsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` // Filters contains one or more filters using the syntax defined in the // containerd filter package. @@ -620,4922 +887,829 @@ type ListSnapshotsRequest struct { // filters. Expanded, images that match the following will be // returned: // - // filters[0] or filters[1] or ... or filters[n-1] or filters[n] + // filters[0] or filters[1] or ... or filters[n-1] or filters[n] // // If filters is zero-length or nil, all items will be returned. - Filters []string `protobuf:"bytes,2,rep,name=filters,proto3" json:"filters,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Filters []string `protobuf:"bytes,2,rep,name=filters,proto3" json:"filters,omitempty"` } -func (m *ListSnapshotsRequest) Reset() { *m = ListSnapshotsRequest{} } -func (*ListSnapshotsRequest) ProtoMessage() {} -func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{13} -} -func (m *ListSnapshotsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListSnapshotsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListSnapshotsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ListSnapshotsRequest) Reset() { + *x = ListSnapshotsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ListSnapshotsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListSnapshotsRequest.Merge(m, src) -} -func (m *ListSnapshotsRequest) XXX_Size() int { - return m.Size() -} -func (m *ListSnapshotsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListSnapshotsRequest.DiscardUnknown(m) + +func (x *ListSnapshotsRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ListSnapshotsRequest proto.InternalMessageInfo +func (*ListSnapshotsRequest) ProtoMessage() {} -type ListSnapshotsResponse struct { - Info []Info `protobuf:"bytes,1,rep,name=info,proto3" json:"info"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *ListSnapshotsRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (m *ListSnapshotsResponse) Reset() { *m = ListSnapshotsResponse{} } -func (*ListSnapshotsResponse) ProtoMessage() {} -func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{14} -} -func (m *ListSnapshotsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListSnapshotsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListSnapshotsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ListSnapshotsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListSnapshotsResponse.Merge(m, src) -} -func (m *ListSnapshotsResponse) XXX_Size() int { - return m.Size() -} -func (m *ListSnapshotsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListSnapshotsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ListSnapshotsResponse proto.InternalMessageInfo - -type UsageRequest struct { - Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` - Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UsageRequest) Reset() { *m = UsageRequest{} } -func (*UsageRequest) ProtoMessage() {} -func (*UsageRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{15} -} -func (m *UsageRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UsageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UsageRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UsageRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UsageRequest.Merge(m, src) -} -func (m *UsageRequest) XXX_Size() int { - return m.Size() -} -func (m *UsageRequest) XXX_DiscardUnknown() { - xxx_messageInfo_UsageRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_UsageRequest proto.InternalMessageInfo - -type UsageResponse struct { - Size_ int64 `protobuf:"varint,1,opt,name=size,proto3" json:"size,omitempty"` - Inodes int64 `protobuf:"varint,2,opt,name=inodes,proto3" json:"inodes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UsageResponse) Reset() { *m = UsageResponse{} } -func (*UsageResponse) ProtoMessage() {} -func (*UsageResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{16} -} -func (m *UsageResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UsageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UsageResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UsageResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UsageResponse.Merge(m, src) -} -func (m *UsageResponse) XXX_Size() int { - return m.Size() -} -func (m *UsageResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UsageResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_UsageResponse proto.InternalMessageInfo - -type CleanupRequest struct { - Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CleanupRequest) Reset() { *m = CleanupRequest{} } -func (*CleanupRequest) ProtoMessage() {} -func (*CleanupRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_cfc0ddf12791f168, []int{17} -} -func (m *CleanupRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CleanupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CleanupRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CleanupRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CleanupRequest.Merge(m, src) -} -func (m *CleanupRequest) XXX_Size() int { - return m.Size() -} -func (m *CleanupRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CleanupRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CleanupRequest proto.InternalMessageInfo - -func init() { - proto.RegisterEnum("containerd.services.snapshots.v1.Kind", Kind_name, Kind_value) - proto.RegisterType((*PrepareSnapshotRequest)(nil), "containerd.services.snapshots.v1.PrepareSnapshotRequest") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.snapshots.v1.PrepareSnapshotRequest.LabelsEntry") - proto.RegisterType((*PrepareSnapshotResponse)(nil), "containerd.services.snapshots.v1.PrepareSnapshotResponse") - proto.RegisterType((*ViewSnapshotRequest)(nil), "containerd.services.snapshots.v1.ViewSnapshotRequest") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.snapshots.v1.ViewSnapshotRequest.LabelsEntry") - proto.RegisterType((*ViewSnapshotResponse)(nil), "containerd.services.snapshots.v1.ViewSnapshotResponse") - proto.RegisterType((*MountsRequest)(nil), "containerd.services.snapshots.v1.MountsRequest") - proto.RegisterType((*MountsResponse)(nil), "containerd.services.snapshots.v1.MountsResponse") - proto.RegisterType((*RemoveSnapshotRequest)(nil), "containerd.services.snapshots.v1.RemoveSnapshotRequest") - proto.RegisterType((*CommitSnapshotRequest)(nil), "containerd.services.snapshots.v1.CommitSnapshotRequest") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.snapshots.v1.CommitSnapshotRequest.LabelsEntry") - proto.RegisterType((*StatSnapshotRequest)(nil), "containerd.services.snapshots.v1.StatSnapshotRequest") - proto.RegisterType((*Info)(nil), "containerd.services.snapshots.v1.Info") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.snapshots.v1.Info.LabelsEntry") - proto.RegisterType((*StatSnapshotResponse)(nil), "containerd.services.snapshots.v1.StatSnapshotResponse") - proto.RegisterType((*UpdateSnapshotRequest)(nil), "containerd.services.snapshots.v1.UpdateSnapshotRequest") - proto.RegisterType((*UpdateSnapshotResponse)(nil), "containerd.services.snapshots.v1.UpdateSnapshotResponse") - proto.RegisterType((*ListSnapshotsRequest)(nil), "containerd.services.snapshots.v1.ListSnapshotsRequest") - proto.RegisterType((*ListSnapshotsResponse)(nil), "containerd.services.snapshots.v1.ListSnapshotsResponse") - proto.RegisterType((*UsageRequest)(nil), "containerd.services.snapshots.v1.UsageRequest") - proto.RegisterType((*UsageResponse)(nil), "containerd.services.snapshots.v1.UsageResponse") - proto.RegisterType((*CleanupRequest)(nil), "containerd.services.snapshots.v1.CleanupRequest") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto", fileDescriptor_cfc0ddf12791f168) -} - -var fileDescriptor_cfc0ddf12791f168 = []byte{ - // 1047 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xf7, 0xda, 0x5b, 0x27, 0x79, 0x4e, 0x82, 0x99, 0x3a, 0xae, 0xb5, 0x20, 0x67, 0xe5, 0x03, - 0x8a, 0x38, 0xec, 0xb6, 0x46, 0xb4, 0x69, 0x73, 0xc1, 0x71, 0x0c, 0x72, 0xd2, 0xa4, 0x68, 0xf3, - 0xa7, 0x4d, 0x41, 0x54, 0x1b, 0x7b, 0xec, 0xac, 0xec, 0xfd, 0x83, 0x67, 0xec, 0xca, 0x20, 0x21, - 0x8e, 0x55, 0x4e, 0x7c, 0x81, 0x9c, 0xe0, 0x43, 0x20, 0x3e, 0x41, 0x8e, 0x48, 0x5c, 0x38, 0x01, - 0xcd, 0x97, 0xe0, 0x84, 0x40, 0x33, 0x3b, 0xeb, 0x7f, 0x71, 0xe5, 0xb5, 0x6b, 0x6e, 0x33, 0x3b, - 0xf3, 0xde, 0xfb, 0xbd, 0xdf, 0x9b, 0xf7, 0x9b, 0x59, 0xd8, 0xad, 0x5b, 0xf4, 0xbc, 0x7d, 0xa6, - 0x55, 0x5c, 0x5b, 0xaf, 0xb8, 0x0e, 0x35, 0x2d, 0x07, 0xb7, 0xaa, 0x83, 0x43, 0xd3, 0xb3, 0x74, - 0x82, 0x5b, 0x1d, 0xab, 0x82, 0x89, 0x4e, 0x1c, 0xd3, 0x23, 0xe7, 0x2e, 0x25, 0x7a, 0xe7, 0x5e, - 0x7f, 0xa2, 0x79, 0x2d, 0x97, 0xba, 0x48, 0xed, 0x5b, 0x69, 0x81, 0x85, 0xd6, 0xdf, 0xd4, 0xb9, - 0xa7, 0xa4, 0xea, 0x6e, 0xdd, 0xe5, 0x9b, 0x75, 0x36, 0xf2, 0xed, 0x94, 0xf7, 0xea, 0xae, 0x5b, - 0x6f, 0x62, 0x9d, 0xcf, 0xce, 0xda, 0x35, 0x1d, 0xdb, 0x1e, 0xed, 0x8a, 0x45, 0x75, 0x74, 0xb1, - 0x66, 0xe1, 0x66, 0xf5, 0x85, 0x6d, 0x92, 0x86, 0xd8, 0xb1, 0x3e, 0xba, 0x83, 0x5a, 0x36, 0x26, - 0xd4, 0xb4, 0x3d, 0xb1, 0xe1, 0x7e, 0xa8, 0x1c, 0x69, 0xd7, 0xc3, 0x44, 0xb7, 0xdd, 0xb6, 0x43, - 0x7d, 0xbb, 0xdc, 0x3f, 0x12, 0xa4, 0x3f, 0x6f, 0x61, 0xcf, 0x6c, 0xe1, 0x43, 0x91, 0x85, 0x81, - 0xbf, 0x6e, 0x63, 0x42, 0x91, 0x0a, 0x89, 0x20, 0x31, 0x8a, 0x5b, 0x19, 0x49, 0x95, 0x36, 0x96, - 0x8c, 0xc1, 0x4f, 0x28, 0x09, 0xb1, 0x06, 0xee, 0x66, 0xa2, 0x7c, 0x85, 0x0d, 0x51, 0x1a, 0xe2, - 0xcc, 0x95, 0x43, 0x33, 0x31, 0xfe, 0x51, 0xcc, 0xd0, 0x97, 0x10, 0x6f, 0x9a, 0x67, 0xb8, 0x49, - 0x32, 0xb2, 0x1a, 0xdb, 0x48, 0xe4, 0x77, 0xb4, 0x49, 0x3c, 0x6a, 0xe3, 0x51, 0x69, 0x8f, 0xb9, - 0x9b, 0x92, 0x43, 0x5b, 0x5d, 0x43, 0xf8, 0x54, 0x1e, 0x42, 0x62, 0xe0, 0x73, 0x00, 0x4b, 0xea, - 0xc3, 0x4a, 0xc1, 0xad, 0x8e, 0xd9, 0x6c, 0x63, 0x01, 0xd5, 0x9f, 0x3c, 0x8a, 0x6e, 0x4a, 0xb9, - 0x5d, 0xb8, 0x73, 0x23, 0x10, 0xf1, 0x5c, 0x87, 0x60, 0xa4, 0x43, 0x9c, 0x33, 0x45, 0x32, 0x12, - 0xc7, 0x7c, 0x67, 0x10, 0x33, 0x67, 0x52, 0xdb, 0x67, 0xeb, 0x86, 0xd8, 0x96, 0xfb, 0x5b, 0x82, - 0xdb, 0x27, 0x16, 0x7e, 0xf9, 0x7f, 0x12, 0x79, 0x3a, 0x42, 0x64, 0x61, 0x32, 0x91, 0x63, 0x20, - 0xcd, 0x9b, 0xc5, 0xcf, 0x20, 0x35, 0x1c, 0x65, 0x56, 0x0a, 0x8b, 0xb0, 0xc2, 0x3f, 0x90, 0xb7, - 0xe0, 0x2e, 0x57, 0x80, 0xd5, 0xc0, 0xc9, 0xac, 0x38, 0xf6, 0x60, 0xcd, 0xc0, 0xb6, 0xdb, 0x99, - 0x47, 0x53, 0xb0, 0x73, 0xb1, 0x56, 0x74, 0x6d, 0xdb, 0xa2, 0xd3, 0x7b, 0x43, 0x20, 0x3b, 0xa6, - 0x1d, 0x50, 0xce, 0xc7, 0x41, 0x84, 0x58, 0xbf, 0x32, 0x5f, 0x8c, 0x9c, 0x8a, 0xe2, 0xe4, 0x53, - 0x31, 0x16, 0xd0, 0xbc, 0xcf, 0x45, 0x19, 0x6e, 0x1f, 0x52, 0x93, 0xce, 0x83, 0xc4, 0x7f, 0xa3, - 0x20, 0x97, 0x9d, 0x9a, 0xdb, 0x63, 0x44, 0x1a, 0x60, 0xa4, 0xdf, 0x2d, 0xd1, 0xa1, 0x6e, 0x79, - 0x04, 0x72, 0xc3, 0x72, 0xaa, 0x9c, 0xaa, 0xd5, 0xfc, 0x07, 0x93, 0x59, 0xd9, 0xb3, 0x9c, 0xaa, - 0xc1, 0x6d, 0x50, 0x11, 0xa0, 0xd2, 0xc2, 0x26, 0xc5, 0xd5, 0x17, 0x26, 0xcd, 0xc8, 0xaa, 0xb4, - 0x91, 0xc8, 0x2b, 0x9a, 0xaf, 0xc3, 0x5a, 0xa0, 0xc3, 0xda, 0x51, 0xa0, 0xc3, 0xdb, 0x8b, 0x57, - 0x7f, 0xac, 0x47, 0x7e, 0xf8, 0x73, 0x5d, 0x32, 0x96, 0x84, 0x5d, 0x81, 0x32, 0x27, 0x6d, 0xaf, - 0x1a, 0x38, 0xb9, 0x35, 0x8d, 0x13, 0x61, 0x57, 0xa0, 0x68, 0xb7, 0x57, 0xdd, 0x38, 0xaf, 0x6e, - 0x7e, 0x72, 0x1e, 0x8c, 0xa9, 0x79, 0x17, 0xf3, 0x19, 0xa4, 0x86, 0x8b, 0x29, 0x9a, 0xeb, 0x13, - 0x90, 0x2d, 0xa7, 0xe6, 0x72, 0x27, 0x89, 0x30, 0x24, 0x33, 0x70, 0xdb, 0x32, 0xcb, 0xd4, 0xe0, - 0x96, 0xb9, 0x9f, 0x25, 0x58, 0x3b, 0xe6, 0xe9, 0x4e, 0x7f, 0x52, 0x82, 0xe8, 0xd1, 0x59, 0xa3, - 0xa3, 0x2d, 0x48, 0xf8, 0x5c, 0xf3, 0x0b, 0x97, 0x9f, 0x95, 0x71, 0x45, 0xfa, 0x94, 0xdd, 0xc9, - 0xfb, 0x26, 0x69, 0x18, 0xa2, 0xa4, 0x6c, 0x9c, 0x7b, 0x0e, 0xe9, 0x51, 0xe4, 0x73, 0xa3, 0xc5, - 0x80, 0xd4, 0x63, 0x8b, 0xf4, 0x08, 0x9f, 0x42, 0x13, 0x33, 0xb0, 0x50, 0xb3, 0x9a, 0x14, 0xb7, - 0x48, 0x26, 0xaa, 0xc6, 0x36, 0x96, 0x8c, 0x60, 0x9a, 0x3b, 0x85, 0xb5, 0x11, 0x9f, 0x37, 0xe0, - 0xc6, 0x66, 0x84, 0xbb, 0x0d, 0xcb, 0xc7, 0xc4, 0xac, 0xe3, 0xb7, 0xe9, 0xf2, 0x2d, 0x58, 0x11, - 0x3e, 0x04, 0x2c, 0x04, 0x32, 0xb1, 0xbe, 0xf1, 0xbb, 0x3d, 0x66, 0xf0, 0x31, 0xeb, 0x76, 0xcb, - 0x71, 0xab, 0x98, 0x70, 0xcb, 0x98, 0x21, 0x66, 0xb9, 0x3c, 0xac, 0x16, 0x9b, 0xd8, 0x74, 0xda, - 0x5e, 0x68, 0x08, 0x1f, 0xbe, 0x92, 0x40, 0x66, 0x4d, 0x8f, 0xde, 0x87, 0x85, 0xe3, 0x83, 0xbd, - 0x83, 0x27, 0x4f, 0x0f, 0x92, 0x11, 0xe5, 0x9d, 0x8b, 0x4b, 0x35, 0xc1, 0x3e, 0x1f, 0x3b, 0x0d, - 0xc7, 0x7d, 0xe9, 0xa0, 0x34, 0xc8, 0x27, 0xe5, 0xd2, 0xd3, 0xa4, 0xa4, 0x2c, 0x5f, 0x5c, 0xaa, - 0x8b, 0x6c, 0x89, 0x5d, 0x78, 0x48, 0x81, 0x78, 0xa1, 0x78, 0x54, 0x3e, 0x29, 0x25, 0xa3, 0xca, - 0xea, 0xc5, 0xa5, 0x0a, 0x6c, 0xa5, 0x50, 0xa1, 0x56, 0x07, 0x23, 0x15, 0x96, 0x8a, 0x4f, 0xf6, - 0xf7, 0xcb, 0x47, 0x47, 0xa5, 0x9d, 0x64, 0x4c, 0x79, 0xf7, 0xe2, 0x52, 0x5d, 0x61, 0xcb, 0xbe, - 0xf2, 0x52, 0x5c, 0x55, 0x96, 0x5f, 0xfd, 0x98, 0x8d, 0xfc, 0xf2, 0x53, 0x96, 0x23, 0xc8, 0xff, - 0xb6, 0x08, 0x4b, 0xbd, 0xba, 0xa0, 0xef, 0x60, 0x41, 0x3c, 0x4c, 0xd0, 0xe6, 0xac, 0x8f, 0x25, - 0xe5, 0xe1, 0x0c, 0x96, 0x82, 0xf8, 0x36, 0xc8, 0x3c, 0xc3, 0x8f, 0x67, 0x7a, 0x60, 0x28, 0xf7, - 0xa7, 0x35, 0x13, 0x61, 0x1b, 0x10, 0xf7, 0xef, 0x6e, 0xa4, 0x4f, 0xf6, 0x30, 0xf4, 0x54, 0x50, - 0xee, 0x86, 0x37, 0x10, 0xc1, 0x4e, 0x21, 0xee, 0x17, 0x03, 0x3d, 0x98, 0xf1, 0xc2, 0x54, 0xd2, - 0x37, 0x74, 0xa2, 0xc4, 0x1e, 0xf6, 0xcc, 0xb5, 0xff, 0x80, 0x08, 0xe3, 0x7a, 0xec, 0x53, 0xe3, - 0x8d, 0xae, 0xdb, 0x20, 0x33, 0x1d, 0x0e, 0x53, 0x99, 0x31, 0x97, 0x6f, 0x98, 0xca, 0x8c, 0x95, - 0xf9, 0x6f, 0x21, 0xee, 0x2b, 0x5d, 0x98, 0x8c, 0xc6, 0xaa, 0xb9, 0xb2, 0x39, 0xbd, 0xa1, 0x08, - 0xde, 0x05, 0x99, 0xc9, 0x16, 0x0a, 0x01, 0x7e, 0x9c, 0x64, 0x2a, 0x0f, 0xa6, 0xb6, 0xf3, 0x03, - 0xdf, 0x95, 0xd0, 0x39, 0xdc, 0xe2, 0x92, 0x84, 0xb4, 0x10, 0xe8, 0x07, 0xf4, 0x4f, 0xd1, 0x43, - 0xef, 0x17, 0x49, 0x1e, 0xc2, 0x82, 0xd0, 0x2f, 0x14, 0xe2, 0x2c, 0x0f, 0x4b, 0xdd, 0x9b, 0x4e, - 0xcb, 0xf6, 0x57, 0x57, 0xaf, 0xb3, 0x91, 0xdf, 0x5f, 0x67, 0x23, 0xdf, 0x5f, 0x67, 0xa5, 0xab, - 0xeb, 0xac, 0xf4, 0xeb, 0x75, 0x56, 0xfa, 0xeb, 0x3a, 0x2b, 0x3d, 0xdf, 0x99, 0xfd, 0xb7, 0x78, - 0xab, 0x37, 0x79, 0x16, 0x39, 0x8b, 0xf3, 0x88, 0x1f, 0xfd, 0x17, 0x00, 0x00, 0xff, 0xff, 0x20, - 0x9c, 0x85, 0x7f, 0x67, 0x0f, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// SnapshotsClient is the client API for Snapshots service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type SnapshotsClient interface { - Prepare(ctx context.Context, in *PrepareSnapshotRequest, opts ...grpc.CallOption) (*PrepareSnapshotResponse, error) - View(ctx context.Context, in *ViewSnapshotRequest, opts ...grpc.CallOption) (*ViewSnapshotResponse, error) - Mounts(ctx context.Context, in *MountsRequest, opts ...grpc.CallOption) (*MountsResponse, error) - Commit(ctx context.Context, in *CommitSnapshotRequest, opts ...grpc.CallOption) (*types1.Empty, error) - Remove(ctx context.Context, in *RemoveSnapshotRequest, opts ...grpc.CallOption) (*types1.Empty, error) - Stat(ctx context.Context, in *StatSnapshotRequest, opts ...grpc.CallOption) (*StatSnapshotResponse, error) - Update(ctx context.Context, in *UpdateSnapshotRequest, opts ...grpc.CallOption) (*UpdateSnapshotResponse, error) - List(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (Snapshots_ListClient, error) - Usage(ctx context.Context, in *UsageRequest, opts ...grpc.CallOption) (*UsageResponse, error) - Cleanup(ctx context.Context, in *CleanupRequest, opts ...grpc.CallOption) (*types1.Empty, error) -} - -type snapshotsClient struct { - cc *grpc.ClientConn -} - -func NewSnapshotsClient(cc *grpc.ClientConn) SnapshotsClient { - return &snapshotsClient{cc} -} - -func (c *snapshotsClient) Prepare(ctx context.Context, in *PrepareSnapshotRequest, opts ...grpc.CallOption) (*PrepareSnapshotResponse, error) { - out := new(PrepareSnapshotResponse) - err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Prepare", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *snapshotsClient) View(ctx context.Context, in *ViewSnapshotRequest, opts ...grpc.CallOption) (*ViewSnapshotResponse, error) { - out := new(ViewSnapshotResponse) - err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/View", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *snapshotsClient) Mounts(ctx context.Context, in *MountsRequest, opts ...grpc.CallOption) (*MountsResponse, error) { - out := new(MountsResponse) - err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Mounts", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *snapshotsClient) Commit(ctx context.Context, in *CommitSnapshotRequest, opts ...grpc.CallOption) (*types1.Empty, error) { - out := new(types1.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Commit", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *snapshotsClient) Remove(ctx context.Context, in *RemoveSnapshotRequest, opts ...grpc.CallOption) (*types1.Empty, error) { - out := new(types1.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Remove", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *snapshotsClient) Stat(ctx context.Context, in *StatSnapshotRequest, opts ...grpc.CallOption) (*StatSnapshotResponse, error) { - out := new(StatSnapshotResponse) - err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Stat", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *snapshotsClient) Update(ctx context.Context, in *UpdateSnapshotRequest, opts ...grpc.CallOption) (*UpdateSnapshotResponse, error) { - out := new(UpdateSnapshotResponse) - err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Update", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *snapshotsClient) List(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (Snapshots_ListClient, error) { - stream, err := c.cc.NewStream(ctx, &_Snapshots_serviceDesc.Streams[0], "/containerd.services.snapshots.v1.Snapshots/List", opts...) - if err != nil { - return nil, err - } - x := &snapshotsListClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Snapshots_ListClient interface { - Recv() (*ListSnapshotsResponse, error) - grpc.ClientStream -} - -type snapshotsListClient struct { - grpc.ClientStream -} - -func (x *snapshotsListClient) Recv() (*ListSnapshotsResponse, error) { - m := new(ListSnapshotsResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *snapshotsClient) Usage(ctx context.Context, in *UsageRequest, opts ...grpc.CallOption) (*UsageResponse, error) { - out := new(UsageResponse) - err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Usage", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *snapshotsClient) Cleanup(ctx context.Context, in *CleanupRequest, opts ...grpc.CallOption) (*types1.Empty, error) { - out := new(types1.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Cleanup", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// SnapshotsServer is the server API for Snapshots service. -type SnapshotsServer interface { - Prepare(context.Context, *PrepareSnapshotRequest) (*PrepareSnapshotResponse, error) - View(context.Context, *ViewSnapshotRequest) (*ViewSnapshotResponse, error) - Mounts(context.Context, *MountsRequest) (*MountsResponse, error) - Commit(context.Context, *CommitSnapshotRequest) (*types1.Empty, error) - Remove(context.Context, *RemoveSnapshotRequest) (*types1.Empty, error) - Stat(context.Context, *StatSnapshotRequest) (*StatSnapshotResponse, error) - Update(context.Context, *UpdateSnapshotRequest) (*UpdateSnapshotResponse, error) - List(*ListSnapshotsRequest, Snapshots_ListServer) error - Usage(context.Context, *UsageRequest) (*UsageResponse, error) - Cleanup(context.Context, *CleanupRequest) (*types1.Empty, error) -} - -// UnimplementedSnapshotsServer can be embedded to have forward compatible implementations. -type UnimplementedSnapshotsServer struct { -} - -func (*UnimplementedSnapshotsServer) Prepare(ctx context.Context, req *PrepareSnapshotRequest) (*PrepareSnapshotResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Prepare not implemented") -} -func (*UnimplementedSnapshotsServer) View(ctx context.Context, req *ViewSnapshotRequest) (*ViewSnapshotResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method View not implemented") -} -func (*UnimplementedSnapshotsServer) Mounts(ctx context.Context, req *MountsRequest) (*MountsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Mounts not implemented") -} -func (*UnimplementedSnapshotsServer) Commit(ctx context.Context, req *CommitSnapshotRequest) (*types1.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Commit not implemented") -} -func (*UnimplementedSnapshotsServer) Remove(ctx context.Context, req *RemoveSnapshotRequest) (*types1.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Remove not implemented") -} -func (*UnimplementedSnapshotsServer) Stat(ctx context.Context, req *StatSnapshotRequest) (*StatSnapshotResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Stat not implemented") -} -func (*UnimplementedSnapshotsServer) Update(ctx context.Context, req *UpdateSnapshotRequest) (*UpdateSnapshotResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") -} -func (*UnimplementedSnapshotsServer) List(req *ListSnapshotsRequest, srv Snapshots_ListServer) error { - return status.Errorf(codes.Unimplemented, "method List not implemented") -} -func (*UnimplementedSnapshotsServer) Usage(ctx context.Context, req *UsageRequest) (*UsageResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Usage not implemented") -} -func (*UnimplementedSnapshotsServer) Cleanup(ctx context.Context, req *CleanupRequest) (*types1.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Cleanup not implemented") -} - -func RegisterSnapshotsServer(s *grpc.Server, srv SnapshotsServer) { - s.RegisterService(&_Snapshots_serviceDesc, srv) +// Deprecated: Use ListSnapshotsRequest.ProtoReflect.Descriptor instead. +func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{13} } -func _Snapshots_Prepare_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PrepareSnapshotRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SnapshotsServer).Prepare(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.snapshots.v1.Snapshots/Prepare", +func (x *ListSnapshotsRequest) GetSnapshotter() string { + if x != nil { + return x.Snapshotter } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SnapshotsServer).Prepare(ctx, req.(*PrepareSnapshotRequest)) - } - return interceptor(ctx, in, info, handler) + return "" } -func _Snapshots_View_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ViewSnapshotRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SnapshotsServer).View(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.snapshots.v1.Snapshots/View", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SnapshotsServer).View(ctx, req.(*ViewSnapshotRequest)) +func (x *ListSnapshotsRequest) GetFilters() []string { + if x != nil { + return x.Filters } - return interceptor(ctx, in, info, handler) + return nil } -func _Snapshots_Mounts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MountsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SnapshotsServer).Mounts(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.snapshots.v1.Snapshots/Mounts", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SnapshotsServer).Mounts(ctx, req.(*MountsRequest)) - } - return interceptor(ctx, in, info, handler) -} +type ListSnapshotsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func _Snapshots_Commit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CommitSnapshotRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SnapshotsServer).Commit(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.snapshots.v1.Snapshots/Commit", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SnapshotsServer).Commit(ctx, req.(*CommitSnapshotRequest)) - } - return interceptor(ctx, in, info, handler) + Info []*Info `protobuf:"bytes,1,rep,name=info,proto3" json:"info,omitempty"` } -func _Snapshots_Remove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RemoveSnapshotRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SnapshotsServer).Remove(ctx, in) +func (x *ListSnapshotsResponse) Reset() { + *x = ListSnapshotsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.snapshots.v1.Snapshots/Remove", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SnapshotsServer).Remove(ctx, req.(*RemoveSnapshotRequest)) - } - return interceptor(ctx, in, info, handler) } -func _Snapshots_Stat_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StatSnapshotRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SnapshotsServer).Stat(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.snapshots.v1.Snapshots/Stat", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SnapshotsServer).Stat(ctx, req.(*StatSnapshotRequest)) - } - return interceptor(ctx, in, info, handler) +func (x *ListSnapshotsResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func _Snapshots_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateSnapshotRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SnapshotsServer).Update(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.snapshots.v1.Snapshots/Update", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SnapshotsServer).Update(ctx, req.(*UpdateSnapshotRequest)) - } - return interceptor(ctx, in, info, handler) -} +func (*ListSnapshotsResponse) ProtoMessage() {} -func _Snapshots_List_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(ListSnapshotsRequest) - if err := stream.RecvMsg(m); err != nil { - return err +func (x *ListSnapshotsResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return srv.(SnapshotsServer).List(m, &snapshotsListServer{stream}) -} - -type Snapshots_ListServer interface { - Send(*ListSnapshotsResponse) error - grpc.ServerStream -} - -type snapshotsListServer struct { - grpc.ServerStream -} - -func (x *snapshotsListServer) Send(m *ListSnapshotsResponse) error { - return x.ServerStream.SendMsg(m) + return mi.MessageOf(x) } -func _Snapshots_Usage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UsageRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SnapshotsServer).Usage(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.snapshots.v1.Snapshots/Usage", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SnapshotsServer).Usage(ctx, req.(*UsageRequest)) - } - return interceptor(ctx, in, info, handler) +// Deprecated: Use ListSnapshotsResponse.ProtoReflect.Descriptor instead. +func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{14} } -func _Snapshots_Cleanup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CleanupRequest) - if err := dec(in); err != nil { - return nil, err +func (x *ListSnapshotsResponse) GetInfo() []*Info { + if x != nil { + return x.Info } - if interceptor == nil { - return srv.(SnapshotsServer).Cleanup(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.snapshots.v1.Snapshots/Cleanup", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SnapshotsServer).Cleanup(ctx, req.(*CleanupRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Snapshots_serviceDesc = grpc.ServiceDesc{ - ServiceName: "containerd.services.snapshots.v1.Snapshots", - HandlerType: (*SnapshotsServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Prepare", - Handler: _Snapshots_Prepare_Handler, - }, - { - MethodName: "View", - Handler: _Snapshots_View_Handler, - }, - { - MethodName: "Mounts", - Handler: _Snapshots_Mounts_Handler, - }, - { - MethodName: "Commit", - Handler: _Snapshots_Commit_Handler, - }, - { - MethodName: "Remove", - Handler: _Snapshots_Remove_Handler, - }, - { - MethodName: "Stat", - Handler: _Snapshots_Stat_Handler, - }, - { - MethodName: "Update", - Handler: _Snapshots_Update_Handler, - }, - { - MethodName: "Usage", - Handler: _Snapshots_Usage_Handler, - }, - { - MethodName: "Cleanup", - Handler: _Snapshots_Cleanup_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "List", - Handler: _Snapshots_List_Handler, - ServerStreams: true, - }, - }, - Metadata: "github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto", + return nil } -func (m *PrepareSnapshotRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} +type UsageRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *PrepareSnapshotRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` + Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` } -func (m *PrepareSnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintSnapshots(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintSnapshots(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintSnapshots(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x22 - } - } - if len(m.Parent) > 0 { - i -= len(m.Parent) - copy(dAtA[i:], m.Parent) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Parent))) - i-- - dAtA[i] = 0x1a +func (x *UsageRequest) Reset() { + *x = UsageRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - } - if len(m.Snapshotter) > 0 { - i -= len(m.Snapshotter) - copy(dAtA[i:], m.Snapshotter) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil } -func (m *PrepareSnapshotResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (x *UsageRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *PrepareSnapshotResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +func (*UsageRequest) ProtoMessage() {} -func (m *PrepareSnapshotResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Mounts) > 0 { - for iNdEx := len(m.Mounts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Mounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSnapshots(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa +func (x *UsageRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } - return len(dAtA) - i, nil -} - -func (m *ViewSnapshotRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ViewSnapshotRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return mi.MessageOf(x) } -func (m *ViewSnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintSnapshots(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintSnapshots(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintSnapshots(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x22 - } - } - if len(m.Parent) > 0 { - i -= len(m.Parent) - copy(dAtA[i:], m.Parent) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Parent))) - i-- - dAtA[i] = 0x1a - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - } - if len(m.Snapshotter) > 0 { - i -= len(m.Snapshotter) - copy(dAtA[i:], m.Snapshotter) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil +// Deprecated: Use UsageRequest.ProtoReflect.Descriptor instead. +func (*UsageRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{15} } -func (m *ViewSnapshotResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *UsageRequest) GetSnapshotter() string { + if x != nil { + return x.Snapshotter } - return dAtA[:n], nil -} - -func (m *ViewSnapshotResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return "" } -func (m *ViewSnapshotResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Mounts) > 0 { - for iNdEx := len(m.Mounts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Mounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSnapshots(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } +func (x *UsageRequest) GetKey() string { + if x != nil { + return x.Key } - return len(dAtA) - i, nil + return "" } -func (m *MountsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} +type UsageResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *MountsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + Size int64 `protobuf:"varint,1,opt,name=size,proto3" json:"size,omitempty"` + Inodes int64 `protobuf:"varint,2,opt,name=inodes,proto3" json:"inodes,omitempty"` } -func (m *MountsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 +func (x *UsageResponse) Reset() { + *x = UsageResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - if len(m.Snapshotter) > 0 { - i -= len(m.Snapshotter) - copy(dAtA[i:], m.Snapshotter) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil } -func (m *MountsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (x *UsageResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *MountsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +func (*UsageResponse) ProtoMessage() {} -func (m *MountsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Mounts) > 0 { - for iNdEx := len(m.Mounts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Mounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSnapshots(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa +func (x *UsageResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } - return len(dAtA) - i, nil -} - -func (m *RemoveSnapshotRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RemoveSnapshotRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return mi.MessageOf(x) } -func (m *RemoveSnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - } - if len(m.Snapshotter) > 0 { - i -= len(m.Snapshotter) - copy(dAtA[i:], m.Snapshotter) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil +// Deprecated: Use UsageResponse.ProtoReflect.Descriptor instead. +func (*UsageResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{16} } -func (m *CommitSnapshotRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *UsageResponse) GetSize() int64 { + if x != nil { + return x.Size } - return dAtA[:n], nil -} - -func (m *CommitSnapshotRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return 0 } -func (m *CommitSnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintSnapshots(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintSnapshots(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintSnapshots(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x22 - } - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x1a +func (x *UsageResponse) GetInodes() int64 { + if x != nil { + return x.Inodes } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - } - if len(m.Snapshotter) > 0 { - i -= len(m.Snapshotter) - copy(dAtA[i:], m.Snapshotter) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return 0 } -func (m *StatSnapshotRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} +type CleanupRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *StatSnapshotRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` } -func (m *StatSnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - } - if len(m.Snapshotter) > 0 { - i -= len(m.Snapshotter) - copy(dAtA[i:], m.Snapshotter) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter))) - i-- - dAtA[i] = 0xa +func (x *CleanupRequest) Reset() { + *x = CleanupRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return len(dAtA) - i, nil } -func (m *Info) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (x *CleanupRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Info) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +func (*CleanupRequest) ProtoMessage() {} -func (m *Info) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintSnapshots(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintSnapshots(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintSnapshots(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x32 +func (x *CleanupRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } - n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt):]) - if err1 != nil { - return 0, err1 - } - i -= n1 - i = encodeVarintSnapshots(dAtA, i, uint64(n1)) - i-- - dAtA[i] = 0x2a - n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt):]) - if err2 != nil { - return 0, err2 - } - i -= n2 - i = encodeVarintSnapshots(dAtA, i, uint64(n2)) - i-- - dAtA[i] = 0x22 - if m.Kind != 0 { - i = encodeVarintSnapshots(dAtA, i, uint64(m.Kind)) - i-- - dAtA[i] = 0x18 - } - if len(m.Parent) > 0 { - i -= len(m.Parent) - copy(dAtA[i:], m.Parent) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Parent))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *StatSnapshotResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *StatSnapshotResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatSnapshotResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSnapshots(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil +// Deprecated: Use CleanupRequest.ProtoReflect.Descriptor instead. +func (*CleanupRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP(), []int{17} +} + +func (x *CleanupRequest) GetSnapshotter() string { + if x != nil { + return x.Snapshotter + } + return "" +} + +var File_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDesc = []byte{ + 0x0a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x20, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x1a, 0x1b, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x36, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6d, 0x6f, 0x75, 0x6e, 0x74, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xfd, 0x01, 0x0a, 0x16, 0x50, 0x72, 0x65, 0x70, 0x61, + 0x72, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x74, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x5c, 0x0a, + 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x17, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, + 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x06, 0x6d, 0x6f, 0x75, 0x6e, + 0x74, 0x73, 0x22, 0xf7, 0x01, 0x0a, 0x13, 0x56, 0x69, 0x65, 0x77, 0x53, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x16, + 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x59, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x69, 0x65, 0x77, 0x53, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x47, 0x0a, 0x14, + 0x56, 0x69, 0x65, 0x77, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x06, 0x6d, + 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x22, 0x43, 0x0a, 0x0d, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x41, 0x0a, 0x0e, 0x4d, 0x6f, + 0x75, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, + 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, + 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x06, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x22, 0x4b, 0x0a, + 0x15, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0xf7, 0x01, 0x0a, 0x15, 0x43, + 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x5b, 0x0a, 0x06, + 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x74, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x73, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, + 0xeb, 0x02, 0x0a, 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x12, 0x3a, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, + 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x4a, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x52, 0x0a, + 0x14, 0x53, 0x74, 0x61, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, + 0x6f, 0x22, 0xb2, 0x01, 0x0a, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x73, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x12, 0x3a, 0x0a, + 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, + 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x54, 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x3a, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x52, 0x0a, 0x14, + 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x22, 0x53, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x04, 0x69, 0x6e, 0x66, + 0x6f, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, + 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x22, 0x42, 0x0a, 0x0c, 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x3b, 0x0a, 0x0d, 0x55, 0x73, 0x61, + 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, + 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x22, 0x32, 0x0a, 0x0e, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, + 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x2a, 0x38, 0x0a, 0x04, 0x4b, 0x69, + 0x6e, 0x64, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, + 0x08, 0x0a, 0x04, 0x56, 0x49, 0x45, 0x57, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, + 0x49, 0x56, 0x45, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x54, + 0x45, 0x44, 0x10, 0x03, 0x32, 0xd3, 0x08, 0x0a, 0x09, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x73, 0x12, 0x7e, 0x0a, 0x07, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x12, 0x38, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x65, 0x70, 0x61, + 0x72, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x75, 0x0a, 0x04, 0x56, 0x69, 0x65, 0x77, 0x12, 0x35, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x69, + 0x65, 0x77, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x36, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x69, 0x65, 0x77, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6b, 0x0a, 0x06, 0x4d, 0x6f, 0x75, + 0x6e, 0x74, 0x73, 0x12, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x59, 0x0a, 0x06, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, + 0x12, 0x37, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x12, 0x59, 0x0a, 0x06, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x37, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x75, 0x0a, 0x04, + 0x53, 0x74, 0x61, 0x74, 0x12, 0x35, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x37, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x79, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x36, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x37, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x68, 0x0a, 0x05, 0x55, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x07, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, + 0x12, 0x30, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x46, 0x5a, 0x44, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x73, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x76, 0x31, 0x3b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func (m *UpdateSnapshotRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} +var ( + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescData = file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDesc +) -func (m *UpdateSnapshotRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes = make([]protoimpl.MessageInfo, 22) +var file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_goTypes = []interface{}{ + (Kind)(0), // 0: containerd.services.snapshots.v1.Kind + (*PrepareSnapshotRequest)(nil), // 1: containerd.services.snapshots.v1.PrepareSnapshotRequest + (*PrepareSnapshotResponse)(nil), // 2: containerd.services.snapshots.v1.PrepareSnapshotResponse + (*ViewSnapshotRequest)(nil), // 3: containerd.services.snapshots.v1.ViewSnapshotRequest + (*ViewSnapshotResponse)(nil), // 4: containerd.services.snapshots.v1.ViewSnapshotResponse + (*MountsRequest)(nil), // 5: containerd.services.snapshots.v1.MountsRequest + (*MountsResponse)(nil), // 6: containerd.services.snapshots.v1.MountsResponse + (*RemoveSnapshotRequest)(nil), // 7: containerd.services.snapshots.v1.RemoveSnapshotRequest + (*CommitSnapshotRequest)(nil), // 8: containerd.services.snapshots.v1.CommitSnapshotRequest + (*StatSnapshotRequest)(nil), // 9: containerd.services.snapshots.v1.StatSnapshotRequest + (*Info)(nil), // 10: containerd.services.snapshots.v1.Info + (*StatSnapshotResponse)(nil), // 11: containerd.services.snapshots.v1.StatSnapshotResponse + (*UpdateSnapshotRequest)(nil), // 12: containerd.services.snapshots.v1.UpdateSnapshotRequest + (*UpdateSnapshotResponse)(nil), // 13: containerd.services.snapshots.v1.UpdateSnapshotResponse + (*ListSnapshotsRequest)(nil), // 14: containerd.services.snapshots.v1.ListSnapshotsRequest + (*ListSnapshotsResponse)(nil), // 15: containerd.services.snapshots.v1.ListSnapshotsResponse + (*UsageRequest)(nil), // 16: containerd.services.snapshots.v1.UsageRequest + (*UsageResponse)(nil), // 17: containerd.services.snapshots.v1.UsageResponse + (*CleanupRequest)(nil), // 18: containerd.services.snapshots.v1.CleanupRequest + nil, // 19: containerd.services.snapshots.v1.PrepareSnapshotRequest.LabelsEntry + nil, // 20: containerd.services.snapshots.v1.ViewSnapshotRequest.LabelsEntry + nil, // 21: containerd.services.snapshots.v1.CommitSnapshotRequest.LabelsEntry + nil, // 22: containerd.services.snapshots.v1.Info.LabelsEntry + (*types.Mount)(nil), // 23: containerd.types.Mount + (*timestamppb.Timestamp)(nil), // 24: google.protobuf.Timestamp + (*fieldmaskpb.FieldMask)(nil), // 25: google.protobuf.FieldMask + (*emptypb.Empty)(nil), // 26: google.protobuf.Empty +} +var file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_depIdxs = []int32{ + 19, // 0: containerd.services.snapshots.v1.PrepareSnapshotRequest.labels:type_name -> containerd.services.snapshots.v1.PrepareSnapshotRequest.LabelsEntry + 23, // 1: containerd.services.snapshots.v1.PrepareSnapshotResponse.mounts:type_name -> containerd.types.Mount + 20, // 2: containerd.services.snapshots.v1.ViewSnapshotRequest.labels:type_name -> containerd.services.snapshots.v1.ViewSnapshotRequest.LabelsEntry + 23, // 3: containerd.services.snapshots.v1.ViewSnapshotResponse.mounts:type_name -> containerd.types.Mount + 23, // 4: containerd.services.snapshots.v1.MountsResponse.mounts:type_name -> containerd.types.Mount + 21, // 5: containerd.services.snapshots.v1.CommitSnapshotRequest.labels:type_name -> containerd.services.snapshots.v1.CommitSnapshotRequest.LabelsEntry + 0, // 6: containerd.services.snapshots.v1.Info.kind:type_name -> containerd.services.snapshots.v1.Kind + 24, // 7: containerd.services.snapshots.v1.Info.created_at:type_name -> google.protobuf.Timestamp + 24, // 8: containerd.services.snapshots.v1.Info.updated_at:type_name -> google.protobuf.Timestamp + 22, // 9: containerd.services.snapshots.v1.Info.labels:type_name -> containerd.services.snapshots.v1.Info.LabelsEntry + 10, // 10: containerd.services.snapshots.v1.StatSnapshotResponse.info:type_name -> containerd.services.snapshots.v1.Info + 10, // 11: containerd.services.snapshots.v1.UpdateSnapshotRequest.info:type_name -> containerd.services.snapshots.v1.Info + 25, // 12: containerd.services.snapshots.v1.UpdateSnapshotRequest.update_mask:type_name -> google.protobuf.FieldMask + 10, // 13: containerd.services.snapshots.v1.UpdateSnapshotResponse.info:type_name -> containerd.services.snapshots.v1.Info + 10, // 14: containerd.services.snapshots.v1.ListSnapshotsResponse.info:type_name -> containerd.services.snapshots.v1.Info + 1, // 15: containerd.services.snapshots.v1.Snapshots.Prepare:input_type -> containerd.services.snapshots.v1.PrepareSnapshotRequest + 3, // 16: containerd.services.snapshots.v1.Snapshots.View:input_type -> containerd.services.snapshots.v1.ViewSnapshotRequest + 5, // 17: containerd.services.snapshots.v1.Snapshots.Mounts:input_type -> containerd.services.snapshots.v1.MountsRequest + 8, // 18: containerd.services.snapshots.v1.Snapshots.Commit:input_type -> containerd.services.snapshots.v1.CommitSnapshotRequest + 7, // 19: containerd.services.snapshots.v1.Snapshots.Remove:input_type -> containerd.services.snapshots.v1.RemoveSnapshotRequest + 9, // 20: containerd.services.snapshots.v1.Snapshots.Stat:input_type -> containerd.services.snapshots.v1.StatSnapshotRequest + 12, // 21: containerd.services.snapshots.v1.Snapshots.Update:input_type -> containerd.services.snapshots.v1.UpdateSnapshotRequest + 14, // 22: containerd.services.snapshots.v1.Snapshots.List:input_type -> containerd.services.snapshots.v1.ListSnapshotsRequest + 16, // 23: containerd.services.snapshots.v1.Snapshots.Usage:input_type -> containerd.services.snapshots.v1.UsageRequest + 18, // 24: containerd.services.snapshots.v1.Snapshots.Cleanup:input_type -> containerd.services.snapshots.v1.CleanupRequest + 2, // 25: containerd.services.snapshots.v1.Snapshots.Prepare:output_type -> containerd.services.snapshots.v1.PrepareSnapshotResponse + 4, // 26: containerd.services.snapshots.v1.Snapshots.View:output_type -> containerd.services.snapshots.v1.ViewSnapshotResponse + 6, // 27: containerd.services.snapshots.v1.Snapshots.Mounts:output_type -> containerd.services.snapshots.v1.MountsResponse + 26, // 28: containerd.services.snapshots.v1.Snapshots.Commit:output_type -> google.protobuf.Empty + 26, // 29: containerd.services.snapshots.v1.Snapshots.Remove:output_type -> google.protobuf.Empty + 11, // 30: containerd.services.snapshots.v1.Snapshots.Stat:output_type -> containerd.services.snapshots.v1.StatSnapshotResponse + 13, // 31: containerd.services.snapshots.v1.Snapshots.Update:output_type -> containerd.services.snapshots.v1.UpdateSnapshotResponse + 15, // 32: containerd.services.snapshots.v1.Snapshots.List:output_type -> containerd.services.snapshots.v1.ListSnapshotsResponse + 17, // 33: containerd.services.snapshots.v1.Snapshots.Usage:output_type -> containerd.services.snapshots.v1.UsageResponse + 26, // 34: containerd.services.snapshots.v1.Snapshots.Cleanup:output_type -> google.protobuf.Empty + 25, // [25:35] is the sub-list for method output_type + 15, // [15:25] is the sub-list for method input_type + 15, // [15:15] is the sub-list for extension type_name + 15, // [15:15] is the sub-list for extension extendee + 0, // [0:15] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_init() } +func file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_init() { + if File_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PrepareSnapshotRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PrepareSnapshotResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ViewSnapshotRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ViewSnapshotResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MountsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MountsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RemoveSnapshotRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CommitSnapshotRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatSnapshotRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Info); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StatSnapshotResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateSnapshotRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateSnapshotResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListSnapshotsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListSnapshotsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UsageRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UsageResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CleanupRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDesc, + NumEnums: 1, + NumMessages: 22, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_depIdxs, + EnumInfos: file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_enumTypes, + MessageInfos: file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto = out.File + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_snapshots_v1_snapshots_proto_depIdxs = nil } - -func (m *UpdateSnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.UpdateMask != nil { - { - size, err := m.UpdateMask.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSnapshots(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSnapshots(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Snapshotter) > 0 { - i -= len(m.Snapshotter) - copy(dAtA[i:], m.Snapshotter) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *UpdateSnapshotResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UpdateSnapshotResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UpdateSnapshotResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSnapshots(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ListSnapshotsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListSnapshotsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListSnapshotsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Filters) > 0 { - for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Filters[iNdEx]) - copy(dAtA[i:], m.Filters[iNdEx]) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Filters[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Snapshotter) > 0 { - i -= len(m.Snapshotter) - copy(dAtA[i:], m.Snapshotter) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ListSnapshotsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListSnapshotsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListSnapshotsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Info) > 0 { - for iNdEx := len(m.Info) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Info[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintSnapshots(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *UsageRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UsageRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UsageRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - } - if len(m.Snapshotter) > 0 { - i -= len(m.Snapshotter) - copy(dAtA[i:], m.Snapshotter) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *UsageResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UsageResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UsageResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Inodes != 0 { - i = encodeVarintSnapshots(dAtA, i, uint64(m.Inodes)) - i-- - dAtA[i] = 0x10 - } - if m.Size_ != 0 { - i = encodeVarintSnapshots(dAtA, i, uint64(m.Size_)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *CleanupRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CleanupRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CleanupRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Snapshotter) > 0 { - i -= len(m.Snapshotter) - copy(dAtA[i:], m.Snapshotter) - i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintSnapshots(dAtA []byte, offset int, v uint64) int { - offset -= sovSnapshots(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *PrepareSnapshotRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Snapshotter) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - l = len(m.Parent) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovSnapshots(uint64(len(k))) + 1 + len(v) + sovSnapshots(uint64(len(v))) - n += mapEntrySize + 1 + sovSnapshots(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PrepareSnapshotResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Mounts) > 0 { - for _, e := range m.Mounts { - l = e.Size() - n += 1 + l + sovSnapshots(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ViewSnapshotRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Snapshotter) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - l = len(m.Parent) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovSnapshots(uint64(len(k))) + 1 + len(v) + sovSnapshots(uint64(len(v))) - n += mapEntrySize + 1 + sovSnapshots(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ViewSnapshotResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Mounts) > 0 { - for _, e := range m.Mounts { - l = e.Size() - n += 1 + l + sovSnapshots(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MountsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Snapshotter) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MountsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Mounts) > 0 { - for _, e := range m.Mounts { - l = e.Size() - n += 1 + l + sovSnapshots(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *RemoveSnapshotRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Snapshotter) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CommitSnapshotRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Snapshotter) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovSnapshots(uint64(len(k))) + 1 + len(v) + sovSnapshots(uint64(len(v))) - n += mapEntrySize + 1 + sovSnapshots(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StatSnapshotRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Snapshotter) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Info) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - l = len(m.Parent) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - if m.Kind != 0 { - n += 1 + sovSnapshots(uint64(m.Kind)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt) - n += 1 + l + sovSnapshots(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt) - n += 1 + l + sovSnapshots(uint64(l)) - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovSnapshots(uint64(len(k))) + 1 + len(v) + sovSnapshots(uint64(len(v))) - n += mapEntrySize + 1 + sovSnapshots(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StatSnapshotResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Info.Size() - n += 1 + l + sovSnapshots(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UpdateSnapshotRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Snapshotter) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - l = m.Info.Size() - n += 1 + l + sovSnapshots(uint64(l)) - if m.UpdateMask != nil { - l = m.UpdateMask.Size() - n += 1 + l + sovSnapshots(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UpdateSnapshotResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Info.Size() - n += 1 + l + sovSnapshots(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListSnapshotsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Snapshotter) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - if len(m.Filters) > 0 { - for _, s := range m.Filters { - l = len(s) - n += 1 + l + sovSnapshots(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListSnapshotsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Info) > 0 { - for _, e := range m.Info { - l = e.Size() - n += 1 + l + sovSnapshots(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UsageRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Snapshotter) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - l = len(m.Key) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UsageResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Size_ != 0 { - n += 1 + sovSnapshots(uint64(m.Size_)) - } - if m.Inodes != 0 { - n += 1 + sovSnapshots(uint64(m.Inodes)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CleanupRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Snapshotter) - if l > 0 { - n += 1 + l + sovSnapshots(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovSnapshots(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozSnapshots(x uint64) (n int) { - return sovSnapshots(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *PrepareSnapshotRequest) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&PrepareSnapshotRequest{`, - `Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Parent:` + fmt.Sprintf("%v", this.Parent) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *PrepareSnapshotResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForMounts := "[]*Mount{" - for _, f := range this.Mounts { - repeatedStringForMounts += strings.Replace(fmt.Sprintf("%v", f), "Mount", "types.Mount", 1) + "," - } - repeatedStringForMounts += "}" - s := strings.Join([]string{`&PrepareSnapshotResponse{`, - `Mounts:` + repeatedStringForMounts + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ViewSnapshotRequest) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&ViewSnapshotRequest{`, - `Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Parent:` + fmt.Sprintf("%v", this.Parent) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ViewSnapshotResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForMounts := "[]*Mount{" - for _, f := range this.Mounts { - repeatedStringForMounts += strings.Replace(fmt.Sprintf("%v", f), "Mount", "types.Mount", 1) + "," - } - repeatedStringForMounts += "}" - s := strings.Join([]string{`&ViewSnapshotResponse{`, - `Mounts:` + repeatedStringForMounts + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *MountsRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MountsRequest{`, - `Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *MountsResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForMounts := "[]*Mount{" - for _, f := range this.Mounts { - repeatedStringForMounts += strings.Replace(fmt.Sprintf("%v", f), "Mount", "types.Mount", 1) + "," - } - repeatedStringForMounts += "}" - s := strings.Join([]string{`&MountsResponse{`, - `Mounts:` + repeatedStringForMounts + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *RemoveSnapshotRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RemoveSnapshotRequest{`, - `Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CommitSnapshotRequest) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&CommitSnapshotRequest{`, - `Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *StatSnapshotRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatSnapshotRequest{`, - `Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *Info) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&Info{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Parent:` + fmt.Sprintf("%v", this.Parent) + `,`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `CreatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CreatedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`, - `UpdatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UpdatedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *StatSnapshotResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatSnapshotResponse{`, - `Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UpdateSnapshotRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UpdateSnapshotRequest{`, - `Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`, - `Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`, - `UpdateMask:` + strings.Replace(fmt.Sprintf("%v", this.UpdateMask), "FieldMask", "types1.FieldMask", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UpdateSnapshotResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UpdateSnapshotResponse{`, - `Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListSnapshotsRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ListSnapshotsRequest{`, - `Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`, - `Filters:` + fmt.Sprintf("%v", this.Filters) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListSnapshotsResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForInfo := "[]Info{" - for _, f := range this.Info { - repeatedStringForInfo += strings.Replace(strings.Replace(f.String(), "Info", "Info", 1), `&`, ``, 1) + "," - } - repeatedStringForInfo += "}" - s := strings.Join([]string{`&ListSnapshotsResponse{`, - `Info:` + repeatedStringForInfo + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UsageRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UsageRequest{`, - `Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UsageResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UsageResponse{`, - `Size_:` + fmt.Sprintf("%v", this.Size_) + `,`, - `Inodes:` + fmt.Sprintf("%v", this.Inodes) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CleanupRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CleanupRequest{`, - `Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringSnapshots(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *PrepareSnapshotRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PrepareSnapshotRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PrepareSnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Snapshotter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Parent = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthSnapshots - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthSnapshots - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthSnapshots - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthSnapshots - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PrepareSnapshotResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PrepareSnapshotResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PrepareSnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Mounts = append(m.Mounts, &types.Mount{}) - if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ViewSnapshotRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ViewSnapshotRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ViewSnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Snapshotter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Parent = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthSnapshots - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthSnapshots - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthSnapshots - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthSnapshots - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ViewSnapshotResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ViewSnapshotResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ViewSnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Mounts = append(m.Mounts, &types.Mount{}) - if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MountsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MountsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MountsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Snapshotter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MountsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MountsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MountsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Mounts = append(m.Mounts, &types.Mount{}) - if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RemoveSnapshotRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RemoveSnapshotRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveSnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Snapshotter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CommitSnapshotRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CommitSnapshotRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CommitSnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Snapshotter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthSnapshots - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthSnapshots - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthSnapshots - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthSnapshots - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatSnapshotRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatSnapshotRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatSnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Snapshotter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Info) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Info: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Info: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Parent = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - m.Kind = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Kind |= Kind(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthSnapshots - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthSnapshots - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthSnapshots - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthSnapshots - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatSnapshotResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatSnapshotResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatSnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UpdateSnapshotRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateSnapshotRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateSnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Snapshotter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateMask", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.UpdateMask == nil { - m.UpdateMask = &types1.FieldMask{} - } - if err := m.UpdateMask.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UpdateSnapshotResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateSnapshotResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateSnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListSnapshotsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListSnapshotsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListSnapshotsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Snapshotter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListSnapshotsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListSnapshotsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListSnapshotsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Info = append(m.Info, Info{}) - if err := m.Info[len(m.Info)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UsageRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UsageRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UsageRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Snapshotter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UsageResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UsageResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UsageResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) - } - m.Size_ = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Size_ |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Inodes", wireType) - } - m.Inodes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Inodes |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CleanupRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CleanupRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CleanupRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSnapshots - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSnapshots - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Snapshotter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnapshots(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSnapshots - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipSnapshots(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnapshots - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnapshots - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnapshots - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthSnapshots - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupSnapshots - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthSnapshots - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthSnapshots = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowSnapshots = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupSnapshots = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto b/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto index dfb8ff1e70a9b..170ff473e2a3f 100644 --- a/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto +++ b/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto @@ -18,7 +18,6 @@ syntax = "proto3"; package containerd.services.snapshots.v1; -import weak "gogoproto/gogo.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; import "google/protobuf/timestamp.proto"; @@ -101,13 +100,10 @@ message StatSnapshotRequest { } enum Kind { - option (gogoproto.goproto_enum_prefix) = false; - option (gogoproto.enum_customname) = "Kind"; - - UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "KindUnknown"]; - VIEW = 1 [(gogoproto.enumvalue_customname) = "KindView"]; - ACTIVE = 2 [(gogoproto.enumvalue_customname) = "KindActive"]; - COMMITTED = 3 [(gogoproto.enumvalue_customname) = "KindCommitted"]; + UNKNOWN = 0; + VIEW = 1; + ACTIVE = 2; + COMMITTED = 3; } message Info { @@ -116,10 +112,10 @@ message Info { Kind kind = 3; // CreatedAt provides the time at which the snapshot was created. - google.protobuf.Timestamp created_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp created_at = 4; // UpdatedAt provides the time the info was last updated. - google.protobuf.Timestamp updated_at = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp updated_at = 5; // Labels are arbitrary data on snapshots. // @@ -128,12 +124,12 @@ message Info { } message StatSnapshotResponse { - Info info = 1 [(gogoproto.nullable) = false]; + Info info = 1; } message UpdateSnapshotRequest { string snapshotter = 1; - Info info = 2 [(gogoproto.nullable) = false]; + Info info = 2; // UpdateMask specifies which fields to perform the update on. If empty, // the operation applies to all fields. @@ -145,7 +141,7 @@ message UpdateSnapshotRequest { } message UpdateSnapshotResponse { - Info info = 1 [(gogoproto.nullable) = false]; + Info info = 1; } message ListSnapshotsRequest{ @@ -158,14 +154,14 @@ message ListSnapshotsRequest{ // filters. Expanded, images that match the following will be // returned: // - // filters[0] or filters[1] or ... or filters[n-1] or filters[n] + // filters[0] or filters[1] or ... or filters[n-1] or filters[n] // // If filters is zero-length or nil, all items will be returned. repeated string filters = 2; } message ListSnapshotsResponse { - repeated Info info = 1 [(gogoproto.nullable) = false]; + repeated Info info = 1; } message UsageRequest { diff --git a/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots_grpc.pb.go new file mode 100644 index 0000000000000..765c0274465d5 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/snapshots/v1/snapshots_grpc.pb.go @@ -0,0 +1,458 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto + +package snapshots + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// SnapshotsClient is the client API for Snapshots service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type SnapshotsClient interface { + Prepare(ctx context.Context, in *PrepareSnapshotRequest, opts ...grpc.CallOption) (*PrepareSnapshotResponse, error) + View(ctx context.Context, in *ViewSnapshotRequest, opts ...grpc.CallOption) (*ViewSnapshotResponse, error) + Mounts(ctx context.Context, in *MountsRequest, opts ...grpc.CallOption) (*MountsResponse, error) + Commit(ctx context.Context, in *CommitSnapshotRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + Remove(ctx context.Context, in *RemoveSnapshotRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + Stat(ctx context.Context, in *StatSnapshotRequest, opts ...grpc.CallOption) (*StatSnapshotResponse, error) + Update(ctx context.Context, in *UpdateSnapshotRequest, opts ...grpc.CallOption) (*UpdateSnapshotResponse, error) + List(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (Snapshots_ListClient, error) + Usage(ctx context.Context, in *UsageRequest, opts ...grpc.CallOption) (*UsageResponse, error) + Cleanup(ctx context.Context, in *CleanupRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) +} + +type snapshotsClient struct { + cc grpc.ClientConnInterface +} + +func NewSnapshotsClient(cc grpc.ClientConnInterface) SnapshotsClient { + return &snapshotsClient{cc} +} + +func (c *snapshotsClient) Prepare(ctx context.Context, in *PrepareSnapshotRequest, opts ...grpc.CallOption) (*PrepareSnapshotResponse, error) { + out := new(PrepareSnapshotResponse) + err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Prepare", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snapshotsClient) View(ctx context.Context, in *ViewSnapshotRequest, opts ...grpc.CallOption) (*ViewSnapshotResponse, error) { + out := new(ViewSnapshotResponse) + err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/View", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snapshotsClient) Mounts(ctx context.Context, in *MountsRequest, opts ...grpc.CallOption) (*MountsResponse, error) { + out := new(MountsResponse) + err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Mounts", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snapshotsClient) Commit(ctx context.Context, in *CommitSnapshotRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Commit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snapshotsClient) Remove(ctx context.Context, in *RemoveSnapshotRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Remove", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snapshotsClient) Stat(ctx context.Context, in *StatSnapshotRequest, opts ...grpc.CallOption) (*StatSnapshotResponse, error) { + out := new(StatSnapshotResponse) + err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Stat", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snapshotsClient) Update(ctx context.Context, in *UpdateSnapshotRequest, opts ...grpc.CallOption) (*UpdateSnapshotResponse, error) { + out := new(UpdateSnapshotResponse) + err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snapshotsClient) List(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (Snapshots_ListClient, error) { + stream, err := c.cc.NewStream(ctx, &Snapshots_ServiceDesc.Streams[0], "/containerd.services.snapshots.v1.Snapshots/List", opts...) + if err != nil { + return nil, err + } + x := &snapshotsListClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Snapshots_ListClient interface { + Recv() (*ListSnapshotsResponse, error) + grpc.ClientStream +} + +type snapshotsListClient struct { + grpc.ClientStream +} + +func (x *snapshotsListClient) Recv() (*ListSnapshotsResponse, error) { + m := new(ListSnapshotsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *snapshotsClient) Usage(ctx context.Context, in *UsageRequest, opts ...grpc.CallOption) (*UsageResponse, error) { + out := new(UsageResponse) + err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Usage", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *snapshotsClient) Cleanup(ctx context.Context, in *CleanupRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Cleanup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SnapshotsServer is the server API for Snapshots service. +// All implementations must embed UnimplementedSnapshotsServer +// for forward compatibility +type SnapshotsServer interface { + Prepare(context.Context, *PrepareSnapshotRequest) (*PrepareSnapshotResponse, error) + View(context.Context, *ViewSnapshotRequest) (*ViewSnapshotResponse, error) + Mounts(context.Context, *MountsRequest) (*MountsResponse, error) + Commit(context.Context, *CommitSnapshotRequest) (*emptypb.Empty, error) + Remove(context.Context, *RemoveSnapshotRequest) (*emptypb.Empty, error) + Stat(context.Context, *StatSnapshotRequest) (*StatSnapshotResponse, error) + Update(context.Context, *UpdateSnapshotRequest) (*UpdateSnapshotResponse, error) + List(*ListSnapshotsRequest, Snapshots_ListServer) error + Usage(context.Context, *UsageRequest) (*UsageResponse, error) + Cleanup(context.Context, *CleanupRequest) (*emptypb.Empty, error) + mustEmbedUnimplementedSnapshotsServer() +} + +// UnimplementedSnapshotsServer must be embedded to have forward compatible implementations. +type UnimplementedSnapshotsServer struct { +} + +func (UnimplementedSnapshotsServer) Prepare(context.Context, *PrepareSnapshotRequest) (*PrepareSnapshotResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Prepare not implemented") +} +func (UnimplementedSnapshotsServer) View(context.Context, *ViewSnapshotRequest) (*ViewSnapshotResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method View not implemented") +} +func (UnimplementedSnapshotsServer) Mounts(context.Context, *MountsRequest) (*MountsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Mounts not implemented") +} +func (UnimplementedSnapshotsServer) Commit(context.Context, *CommitSnapshotRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Commit not implemented") +} +func (UnimplementedSnapshotsServer) Remove(context.Context, *RemoveSnapshotRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Remove not implemented") +} +func (UnimplementedSnapshotsServer) Stat(context.Context, *StatSnapshotRequest) (*StatSnapshotResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Stat not implemented") +} +func (UnimplementedSnapshotsServer) Update(context.Context, *UpdateSnapshotRequest) (*UpdateSnapshotResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") +} +func (UnimplementedSnapshotsServer) List(*ListSnapshotsRequest, Snapshots_ListServer) error { + return status.Errorf(codes.Unimplemented, "method List not implemented") +} +func (UnimplementedSnapshotsServer) Usage(context.Context, *UsageRequest) (*UsageResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Usage not implemented") +} +func (UnimplementedSnapshotsServer) Cleanup(context.Context, *CleanupRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Cleanup not implemented") +} +func (UnimplementedSnapshotsServer) mustEmbedUnimplementedSnapshotsServer() {} + +// UnsafeSnapshotsServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to SnapshotsServer will +// result in compilation errors. +type UnsafeSnapshotsServer interface { + mustEmbedUnimplementedSnapshotsServer() +} + +func RegisterSnapshotsServer(s grpc.ServiceRegistrar, srv SnapshotsServer) { + s.RegisterService(&Snapshots_ServiceDesc, srv) +} + +func _Snapshots_Prepare_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PrepareSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnapshotsServer).Prepare(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.snapshots.v1.Snapshots/Prepare", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnapshotsServer).Prepare(ctx, req.(*PrepareSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Snapshots_View_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ViewSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnapshotsServer).View(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.snapshots.v1.Snapshots/View", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnapshotsServer).View(ctx, req.(*ViewSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Snapshots_Mounts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MountsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnapshotsServer).Mounts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.snapshots.v1.Snapshots/Mounts", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnapshotsServer).Mounts(ctx, req.(*MountsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Snapshots_Commit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CommitSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnapshotsServer).Commit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.snapshots.v1.Snapshots/Commit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnapshotsServer).Commit(ctx, req.(*CommitSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Snapshots_Remove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnapshotsServer).Remove(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.snapshots.v1.Snapshots/Remove", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnapshotsServer).Remove(ctx, req.(*RemoveSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Snapshots_Stat_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StatSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnapshotsServer).Stat(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.snapshots.v1.Snapshots/Stat", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnapshotsServer).Stat(ctx, req.(*StatSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Snapshots_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnapshotsServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.snapshots.v1.Snapshots/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnapshotsServer).Update(ctx, req.(*UpdateSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Snapshots_List_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ListSnapshotsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(SnapshotsServer).List(m, &snapshotsListServer{stream}) +} + +type Snapshots_ListServer interface { + Send(*ListSnapshotsResponse) error + grpc.ServerStream +} + +type snapshotsListServer struct { + grpc.ServerStream +} + +func (x *snapshotsListServer) Send(m *ListSnapshotsResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Snapshots_Usage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UsageRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnapshotsServer).Usage(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.snapshots.v1.Snapshots/Usage", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnapshotsServer).Usage(ctx, req.(*UsageRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Snapshots_Cleanup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CleanupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SnapshotsServer).Cleanup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.snapshots.v1.Snapshots/Cleanup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SnapshotsServer).Cleanup(ctx, req.(*CleanupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Snapshots_ServiceDesc is the grpc.ServiceDesc for Snapshots service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Snapshots_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.snapshots.v1.Snapshots", + HandlerType: (*SnapshotsServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Prepare", + Handler: _Snapshots_Prepare_Handler, + }, + { + MethodName: "View", + Handler: _Snapshots_View_Handler, + }, + { + MethodName: "Mounts", + Handler: _Snapshots_Mounts_Handler, + }, + { + MethodName: "Commit", + Handler: _Snapshots_Commit_Handler, + }, + { + MethodName: "Remove", + Handler: _Snapshots_Remove_Handler, + }, + { + MethodName: "Stat", + Handler: _Snapshots_Stat_Handler, + }, + { + MethodName: "Update", + Handler: _Snapshots_Update_Handler, + }, + { + MethodName: "Usage", + Handler: _Snapshots_Usage_Handler, + }, + { + MethodName: "Cleanup", + Handler: _Snapshots_Cleanup_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "List", + Handler: _Snapshots_List_Handler, + ServerStreams: true, + }, + }, + Metadata: "github.com/containerd/containerd/api/services/snapshots/v1/snapshots.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/services/streaming/v1/doc.go b/vendor/github.com/containerd/containerd/api/services/streaming/v1/doc.go new file mode 100644 index 0000000000000..04c4362d83e91 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/streaming/v1/doc.go @@ -0,0 +1,17 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package streaming diff --git a/vendor/github.com/containerd/containerd/api/services/streaming/v1/streaming.pb.go b/vendor/github.com/containerd/containerd/api/services/streaming/v1/streaming.pb.go new file mode 100644 index 0000000000000..08fb6392ecb9e --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/streaming/v1/streaming.pb.go @@ -0,0 +1,175 @@ +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/streaming/v1/streaming.proto + +package streaming + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type StreamInit struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *StreamInit) Reset() { + *x = StreamInit{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StreamInit) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamInit) ProtoMessage() {} + +func (x *StreamInit) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamInit.ProtoReflect.Descriptor instead. +func (*StreamInit) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDescGZIP(), []int{0} +} + +func (x *StreamInit) GetID() string { + if x != nil { + return x.ID + } + return "" +} + +var File_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDesc = []byte{ + 0x0a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, 0x2f, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x20, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x31, 0x1a, 0x19, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x1c, 0x0a, 0x0a, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x49, 0x6e, 0x69, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x32, 0x45, 0x0a, 0x09, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x38, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x14, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x41, 0x6e, 0x79, 0x1a, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x28, 0x01, 0x30, 0x01, 0x42, 0x46, + 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x31, 0x3b, 0x73, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDescData = file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDesc +) + +func file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_goTypes = []interface{}{ + (*StreamInit)(nil), // 0: containerd.services.streaming.v1.StreamInit + (*anypb.Any)(nil), // 1: google.protobuf.Any +} +var file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_depIdxs = []int32{ + 1, // 0: containerd.services.streaming.v1.Streaming.Stream:input_type -> google.protobuf.Any + 1, // 1: containerd.services.streaming.v1.Streaming.Stream:output_type -> google.protobuf.Any + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_init() } +func file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_init() { + if File_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StreamInit); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto = out.File + file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_streaming_v1_streaming_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/services/streaming/v1/streaming.proto b/vendor/github.com/containerd/containerd/api/services/streaming/v1/streaming.proto new file mode 100644 index 0000000000000..4c14f2ecfa095 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/streaming/v1/streaming.proto @@ -0,0 +1,31 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +syntax = "proto3"; + +package containerd.services.streaming.v1; + +import "google/protobuf/any.proto"; + +option go_package = "github.com/containerd/containerd/api/services/streaming/v1;streaming"; + +service Streaming { + rpc Stream(stream google.protobuf.Any) returns (stream google.protobuf.Any); +} + +message StreamInit { + string id = 1; +} diff --git a/vendor/github.com/containerd/containerd/api/services/streaming/v1/streaming_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/streaming/v1/streaming_grpc.pb.go new file mode 100644 index 0000000000000..a0a0bc59c8c1a --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/streaming/v1/streaming_grpc.pb.go @@ -0,0 +1,138 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/streaming/v1/streaming.proto + +package streaming + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + anypb "google.golang.org/protobuf/types/known/anypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// StreamingClient is the client API for Streaming service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type StreamingClient interface { + Stream(ctx context.Context, opts ...grpc.CallOption) (Streaming_StreamClient, error) +} + +type streamingClient struct { + cc grpc.ClientConnInterface +} + +func NewStreamingClient(cc grpc.ClientConnInterface) StreamingClient { + return &streamingClient{cc} +} + +func (c *streamingClient) Stream(ctx context.Context, opts ...grpc.CallOption) (Streaming_StreamClient, error) { + stream, err := c.cc.NewStream(ctx, &Streaming_ServiceDesc.Streams[0], "/containerd.services.streaming.v1.Streaming/Stream", opts...) + if err != nil { + return nil, err + } + x := &streamingStreamClient{stream} + return x, nil +} + +type Streaming_StreamClient interface { + Send(*anypb.Any) error + Recv() (*anypb.Any, error) + grpc.ClientStream +} + +type streamingStreamClient struct { + grpc.ClientStream +} + +func (x *streamingStreamClient) Send(m *anypb.Any) error { + return x.ClientStream.SendMsg(m) +} + +func (x *streamingStreamClient) Recv() (*anypb.Any, error) { + m := new(anypb.Any) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// StreamingServer is the server API for Streaming service. +// All implementations must embed UnimplementedStreamingServer +// for forward compatibility +type StreamingServer interface { + Stream(Streaming_StreamServer) error + mustEmbedUnimplementedStreamingServer() +} + +// UnimplementedStreamingServer must be embedded to have forward compatible implementations. +type UnimplementedStreamingServer struct { +} + +func (UnimplementedStreamingServer) Stream(Streaming_StreamServer) error { + return status.Errorf(codes.Unimplemented, "method Stream not implemented") +} +func (UnimplementedStreamingServer) mustEmbedUnimplementedStreamingServer() {} + +// UnsafeStreamingServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to StreamingServer will +// result in compilation errors. +type UnsafeStreamingServer interface { + mustEmbedUnimplementedStreamingServer() +} + +func RegisterStreamingServer(s grpc.ServiceRegistrar, srv StreamingServer) { + s.RegisterService(&Streaming_ServiceDesc, srv) +} + +func _Streaming_Stream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(StreamingServer).Stream(&streamingStreamServer{stream}) +} + +type Streaming_StreamServer interface { + Send(*anypb.Any) error + Recv() (*anypb.Any, error) + grpc.ServerStream +} + +type streamingStreamServer struct { + grpc.ServerStream +} + +func (x *streamingStreamServer) Send(m *anypb.Any) error { + return x.ServerStream.SendMsg(m) +} + +func (x *streamingStreamServer) Recv() (*anypb.Any, error) { + m := new(anypb.Any) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Streaming_ServiceDesc is the grpc.ServiceDesc for Streaming service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Streaming_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.streaming.v1.Streaming", + HandlerType: (*StreamingServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Stream", + Handler: _Streaming_Stream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "github.com/containerd/containerd/api/services/streaming/v1/streaming.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go index dcc76808938e9..1a55d696dd593 100644 --- a/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go @@ -1,42 +1,50 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/services/tasks/v1/tasks.proto package tasks import ( - context "context" - fmt "fmt" types "github.com/containerd/containerd/api/types" task "github.com/containerd/containerd/api/types/task" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types1 "github.com/gogo/protobuf/types" - github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + emptypb "google.golang.org/protobuf/types/known/emptypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - strings "strings" - time "time" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type CreateTaskRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` // RootFS provides the pre-chroot mounts to perform in the shim before // executing the container task. @@ -44,491 +52,713 @@ type CreateTaskRequest struct { // These are for mounts that cannot be performed in the user namespace. // Typically, these mounts should be resolved from snapshots specified on // the container object. - Rootfs []*types.Mount `protobuf:"bytes,3,rep,name=rootfs,proto3" json:"rootfs,omitempty"` - Stdin string `protobuf:"bytes,4,opt,name=stdin,proto3" json:"stdin,omitempty"` - Stdout string `protobuf:"bytes,5,opt,name=stdout,proto3" json:"stdout,omitempty"` - Stderr string `protobuf:"bytes,6,opt,name=stderr,proto3" json:"stderr,omitempty"` - Terminal bool `protobuf:"varint,7,opt,name=terminal,proto3" json:"terminal,omitempty"` - Checkpoint *types.Descriptor `protobuf:"bytes,8,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` - Options *types1.Any `protobuf:"bytes,9,opt,name=options,proto3" json:"options,omitempty"` - RuntimePath string `protobuf:"bytes,10,opt,name=runtime_path,json=runtimePath,proto3" json:"runtime_path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CreateTaskRequest) Reset() { *m = CreateTaskRequest{} } + Rootfs []*types.Mount `protobuf:"bytes,3,rep,name=rootfs,proto3" json:"rootfs,omitempty"` + Stdin string `protobuf:"bytes,4,opt,name=stdin,proto3" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,5,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,6,opt,name=stderr,proto3" json:"stderr,omitempty"` + Terminal bool `protobuf:"varint,7,opt,name=terminal,proto3" json:"terminal,omitempty"` + Checkpoint *types.Descriptor `protobuf:"bytes,8,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` + Options *anypb.Any `protobuf:"bytes,9,opt,name=options,proto3" json:"options,omitempty"` + RuntimePath string `protobuf:"bytes,10,opt,name=runtime_path,json=runtimePath,proto3" json:"runtime_path,omitempty"` +} + +func (x *CreateTaskRequest) Reset() { + *x = CreateTaskRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + func (*CreateTaskRequest) ProtoMessage() {} -func (*CreateTaskRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{0} -} -func (m *CreateTaskRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CreateTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CreateTaskRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *CreateTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateTaskRequest.ProtoReflect.Descriptor instead. +func (*CreateTaskRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{0} +} + +func (x *CreateTaskRequest) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" +} + +func (x *CreateTaskRequest) GetRootfs() []*types.Mount { + if x != nil { + return x.Rootfs + } + return nil +} + +func (x *CreateTaskRequest) GetStdin() string { + if x != nil { + return x.Stdin + } + return "" +} + +func (x *CreateTaskRequest) GetStdout() string { + if x != nil { + return x.Stdout } + return "" } -func (m *CreateTaskRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateTaskRequest.Merge(m, src) + +func (x *CreateTaskRequest) GetStderr() string { + if x != nil { + return x.Stderr + } + return "" +} + +func (x *CreateTaskRequest) GetTerminal() bool { + if x != nil { + return x.Terminal + } + return false } -func (m *CreateTaskRequest) XXX_Size() int { - return m.Size() + +func (x *CreateTaskRequest) GetCheckpoint() *types.Descriptor { + if x != nil { + return x.Checkpoint + } + return nil } -func (m *CreateTaskRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CreateTaskRequest.DiscardUnknown(m) + +func (x *CreateTaskRequest) GetOptions() *anypb.Any { + if x != nil { + return x.Options + } + return nil } -var xxx_messageInfo_CreateTaskRequest proto.InternalMessageInfo +func (x *CreateTaskRequest) GetRuntimePath() string { + if x != nil { + return x.RuntimePath + } + return "" +} type CreateTaskResponse struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` +} + +func (x *CreateTaskResponse) Reset() { + *x = CreateTaskResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateTaskResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *CreateTaskResponse) Reset() { *m = CreateTaskResponse{} } func (*CreateTaskResponse) ProtoMessage() {} -func (*CreateTaskResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{1} -} -func (m *CreateTaskResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CreateTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CreateTaskResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *CreateTaskResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *CreateTaskResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateTaskResponse.Merge(m, src) -} -func (m *CreateTaskResponse) XXX_Size() int { - return m.Size() + +// Deprecated: Use CreateTaskResponse.ProtoReflect.Descriptor instead. +func (*CreateTaskResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{1} } -func (m *CreateTaskResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CreateTaskResponse.DiscardUnknown(m) + +func (x *CreateTaskResponse) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" } -var xxx_messageInfo_CreateTaskResponse proto.InternalMessageInfo +func (x *CreateTaskResponse) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 +} type StartRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` +} + +func (x *StartRequest) Reset() { + *x = StartRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StartRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *StartRequest) Reset() { *m = StartRequest{} } func (*StartRequest) ProtoMessage() {} -func (*StartRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{2} -} -func (m *StartRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StartRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StartRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *StartRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *StartRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StartRequest.Merge(m, src) -} -func (m *StartRequest) XXX_Size() int { - return m.Size() + +// Deprecated: Use StartRequest.ProtoReflect.Descriptor instead. +func (*StartRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{2} } -func (m *StartRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StartRequest.DiscardUnknown(m) + +func (x *StartRequest) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" } -var xxx_messageInfo_StartRequest proto.InternalMessageInfo +func (x *StartRequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} type StartResponse struct { - Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` +} + +func (x *StartResponse) Reset() { + *x = StartResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StartResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *StartResponse) Reset() { *m = StartResponse{} } func (*StartResponse) ProtoMessage() {} -func (*StartResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{3} -} -func (m *StartResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StartResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StartResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *StartResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *StartResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StartResponse.Merge(m, src) + +// Deprecated: Use StartResponse.ProtoReflect.Descriptor instead. +func (*StartResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{3} } -func (m *StartResponse) XXX_Size() int { - return m.Size() + +func (x *StartResponse) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 } -func (m *StartResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StartResponse.DiscardUnknown(m) + +type DeleteTaskRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` } -var xxx_messageInfo_StartResponse proto.InternalMessageInfo +func (x *DeleteTaskRequest) Reset() { + *x = DeleteTaskRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type DeleteTaskRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *DeleteTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DeleteTaskRequest) Reset() { *m = DeleteTaskRequest{} } func (*DeleteTaskRequest) ProtoMessage() {} -func (*DeleteTaskRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{4} -} -func (m *DeleteTaskRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteTaskRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *DeleteTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *DeleteTaskRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteTaskRequest.Merge(m, src) + +// Deprecated: Use DeleteTaskRequest.ProtoReflect.Descriptor instead. +func (*DeleteTaskRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{4} } -func (m *DeleteTaskRequest) XXX_Size() int { - return m.Size() + +func (x *DeleteTaskRequest) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" } -func (m *DeleteTaskRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteTaskRequest.DiscardUnknown(m) + +type DeleteResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` + ExitStatus uint32 `protobuf:"varint,3,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=exited_at,json=exitedAt,proto3" json:"exited_at,omitempty"` } -var xxx_messageInfo_DeleteTaskRequest proto.InternalMessageInfo +func (x *DeleteResponse) Reset() { + *x = DeleteResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type DeleteResponse struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` - ExitStatus uint32 `protobuf:"varint,3,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` - ExitedAt time.Time `protobuf:"bytes,4,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *DeleteResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } func (*DeleteResponse) ProtoMessage() {} -func (*DeleteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{5} -} -func (m *DeleteResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *DeleteResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteResponse.ProtoReflect.Descriptor instead. +func (*DeleteResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{5} } -func (m *DeleteResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteResponse.Merge(m, src) + +func (x *DeleteResponse) GetID() string { + if x != nil { + return x.ID + } + return "" } -func (m *DeleteResponse) XXX_Size() int { - return m.Size() + +func (x *DeleteResponse) GetPid() uint32 { + if x != nil { + return x.Pid + } + return 0 } -func (m *DeleteResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteResponse.DiscardUnknown(m) + +func (x *DeleteResponse) GetExitStatus() uint32 { + if x != nil { + return x.ExitStatus + } + return 0 } -var xxx_messageInfo_DeleteResponse proto.InternalMessageInfo +func (x *DeleteResponse) GetExitedAt() *timestamppb.Timestamp { + if x != nil { + return x.ExitedAt + } + return nil +} type DeleteProcessRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` +} + +func (x *DeleteProcessRequest) Reset() { + *x = DeleteProcessRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteProcessRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *DeleteProcessRequest) Reset() { *m = DeleteProcessRequest{} } func (*DeleteProcessRequest) ProtoMessage() {} -func (*DeleteProcessRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{6} -} -func (m *DeleteProcessRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteProcessRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteProcessRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *DeleteProcessRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *DeleteProcessRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteProcessRequest.Merge(m, src) -} -func (m *DeleteProcessRequest) XXX_Size() int { - return m.Size() + +// Deprecated: Use DeleteProcessRequest.ProtoReflect.Descriptor instead. +func (*DeleteProcessRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{6} } -func (m *DeleteProcessRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteProcessRequest.DiscardUnknown(m) + +func (x *DeleteProcessRequest) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" } -var xxx_messageInfo_DeleteProcessRequest proto.InternalMessageInfo +func (x *DeleteProcessRequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} type GetRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` +} + +func (x *GetRequest) Reset() { + *x = GetRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetRequest) Reset() { *m = GetRequest{} } func (*GetRequest) ProtoMessage() {} -func (*GetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{7} -} -func (m *GetRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *GetRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *GetRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetRequest.Merge(m, src) -} -func (m *GetRequest) XXX_Size() int { - return m.Size() + +// Deprecated: Use GetRequest.ProtoReflect.Descriptor instead. +func (*GetRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{7} } -func (m *GetRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetRequest.DiscardUnknown(m) + +func (x *GetRequest) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" } -var xxx_messageInfo_GetRequest proto.InternalMessageInfo +func (x *GetRequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} type GetResponse struct { - Process *task.Process `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Process *task.Process `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"` +} + +func (x *GetResponse) Reset() { + *x = GetResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *GetResponse) Reset() { *m = GetResponse{} } func (*GetResponse) ProtoMessage() {} -func (*GetResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{8} -} -func (m *GetResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *GetResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *GetResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetResponse.Merge(m, src) + +// Deprecated: Use GetResponse.ProtoReflect.Descriptor instead. +func (*GetResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{8} } -func (m *GetResponse) XXX_Size() int { - return m.Size() + +func (x *GetResponse) GetProcess() *task.Process { + if x != nil { + return x.Process + } + return nil } -func (m *GetResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetResponse.DiscardUnknown(m) + +type ListTasksRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` } -var xxx_messageInfo_GetResponse proto.InternalMessageInfo +func (x *ListTasksRequest) Reset() { + *x = ListTasksRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type ListTasksRequest struct { - Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *ListTasksRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ListTasksRequest) Reset() { *m = ListTasksRequest{} } func (*ListTasksRequest) ProtoMessage() {} -func (*ListTasksRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{9} -} -func (m *ListTasksRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListTasksRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListTasksRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *ListTasksRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *ListTasksRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListTasksRequest.Merge(m, src) + +// Deprecated: Use ListTasksRequest.ProtoReflect.Descriptor instead. +func (*ListTasksRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{9} } -func (m *ListTasksRequest) XXX_Size() int { - return m.Size() + +func (x *ListTasksRequest) GetFilter() string { + if x != nil { + return x.Filter + } + return "" } -func (m *ListTasksRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListTasksRequest.DiscardUnknown(m) + +type ListTasksResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Tasks []*task.Process `protobuf:"bytes,1,rep,name=tasks,proto3" json:"tasks,omitempty"` } -var xxx_messageInfo_ListTasksRequest proto.InternalMessageInfo +func (x *ListTasksResponse) Reset() { + *x = ListTasksResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type ListTasksResponse struct { - Tasks []*task.Process `protobuf:"bytes,1,rep,name=tasks,proto3" json:"tasks,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *ListTasksResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ListTasksResponse) Reset() { *m = ListTasksResponse{} } func (*ListTasksResponse) ProtoMessage() {} -func (*ListTasksResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{10} -} -func (m *ListTasksResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListTasksResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListTasksResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *ListTasksResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *ListTasksResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListTasksResponse.Merge(m, src) + +// Deprecated: Use ListTasksResponse.ProtoReflect.Descriptor instead. +func (*ListTasksResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{10} } -func (m *ListTasksResponse) XXX_Size() int { - return m.Size() + +func (x *ListTasksResponse) GetTasks() []*task.Process { + if x != nil { + return x.Tasks + } + return nil } -func (m *ListTasksResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListTasksResponse.DiscardUnknown(m) + +type KillRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + Signal uint32 `protobuf:"varint,3,opt,name=signal,proto3" json:"signal,omitempty"` + All bool `protobuf:"varint,4,opt,name=all,proto3" json:"all,omitempty"` } -var xxx_messageInfo_ListTasksResponse proto.InternalMessageInfo +func (x *KillRequest) Reset() { + *x = KillRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type KillRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - Signal uint32 `protobuf:"varint,3,opt,name=signal,proto3" json:"signal,omitempty"` - All bool `protobuf:"varint,4,opt,name=all,proto3" json:"all,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *KillRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *KillRequest) Reset() { *m = KillRequest{} } func (*KillRequest) ProtoMessage() {} -func (*KillRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{11} -} -func (m *KillRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *KillRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_KillRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (x *KillRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) +} + +// Deprecated: Use KillRequest.ProtoReflect.Descriptor instead. +func (*KillRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{11} } -func (m *KillRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_KillRequest.Merge(m, src) + +func (x *KillRequest) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" } -func (m *KillRequest) XXX_Size() int { - return m.Size() + +func (x *KillRequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" } -func (m *KillRequest) XXX_DiscardUnknown() { - xxx_messageInfo_KillRequest.DiscardUnknown(m) + +func (x *KillRequest) GetSignal() uint32 { + if x != nil { + return x.Signal + } + return 0 } -var xxx_messageInfo_KillRequest proto.InternalMessageInfo +func (x *KillRequest) GetAll() bool { + if x != nil { + return x.All + } + return false +} type ExecProcessRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` Stdin string `protobuf:"bytes,2,opt,name=stdin,proto3" json:"stdin,omitempty"` Stdout string `protobuf:"bytes,3,opt,name=stdout,proto3" json:"stdout,omitempty"` @@ -537,6975 +767,1593 @@ type ExecProcessRequest struct { // Spec for starting a process in the target container. // // For runc, this is a process spec, for example. - Spec *types1.Any `protobuf:"bytes,6,opt,name=spec,proto3" json:"spec,omitempty"` + Spec *anypb.Any `protobuf:"bytes,6,opt,name=spec,proto3" json:"spec,omitempty"` // id of the exec process - ExecID string `protobuf:"bytes,7,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ExecID string `protobuf:"bytes,7,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` } -func (m *ExecProcessRequest) Reset() { *m = ExecProcessRequest{} } -func (*ExecProcessRequest) ProtoMessage() {} -func (*ExecProcessRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{12} -} -func (m *ExecProcessRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExecProcessRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExecProcessRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ExecProcessRequest) Reset() { + *x = ExecProcessRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ExecProcessRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExecProcessRequest.Merge(m, src) -} -func (m *ExecProcessRequest) XXX_Size() int { - return m.Size() -} -func (m *ExecProcessRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ExecProcessRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ExecProcessRequest proto.InternalMessageInfo -type ExecProcessResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *ExecProcessRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ExecProcessResponse) Reset() { *m = ExecProcessResponse{} } -func (*ExecProcessResponse) ProtoMessage() {} -func (*ExecProcessResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{13} -} -func (m *ExecProcessResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExecProcessResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExecProcessResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*ExecProcessRequest) ProtoMessage() {} + +func (x *ExecProcessRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *ExecProcessResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExecProcessResponse.Merge(m, src) + +// Deprecated: Use ExecProcessRequest.ProtoReflect.Descriptor instead. +func (*ExecProcessRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{12} } -func (m *ExecProcessResponse) XXX_Size() int { - return m.Size() + +func (x *ExecProcessRequest) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" } -func (m *ExecProcessResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ExecProcessResponse.DiscardUnknown(m) + +func (x *ExecProcessRequest) GetStdin() string { + if x != nil { + return x.Stdin + } + return "" } -var xxx_messageInfo_ExecProcessResponse proto.InternalMessageInfo +func (x *ExecProcessRequest) GetStdout() string { + if x != nil { + return x.Stdout + } + return "" +} -type ResizePtyRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - Width uint32 `protobuf:"varint,3,opt,name=width,proto3" json:"width,omitempty"` - Height uint32 `protobuf:"varint,4,opt,name=height,proto3" json:"height,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *ExecProcessRequest) GetStderr() string { + if x != nil { + return x.Stderr + } + return "" } -func (m *ResizePtyRequest) Reset() { *m = ResizePtyRequest{} } -func (*ResizePtyRequest) ProtoMessage() {} -func (*ResizePtyRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{14} -} -func (m *ResizePtyRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResizePtyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResizePtyRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ExecProcessRequest) GetTerminal() bool { + if x != nil { + return x.Terminal } + return false } -func (m *ResizePtyRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResizePtyRequest.Merge(m, src) + +func (x *ExecProcessRequest) GetSpec() *anypb.Any { + if x != nil { + return x.Spec + } + return nil } -func (m *ResizePtyRequest) XXX_Size() int { - return m.Size() + +func (x *ExecProcessRequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" } -func (m *ResizePtyRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ResizePtyRequest.DiscardUnknown(m) + +type ExecProcessResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -var xxx_messageInfo_ResizePtyRequest proto.InternalMessageInfo +func (x *ExecProcessResponse) Reset() { + *x = ExecProcessResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type CloseIORequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - Stdin bool `protobuf:"varint,3,opt,name=stdin,proto3" json:"stdin,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *ExecProcessResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *CloseIORequest) Reset() { *m = CloseIORequest{} } -func (*CloseIORequest) ProtoMessage() {} -func (*CloseIORequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{15} -} -func (m *CloseIORequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CloseIORequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CloseIORequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*ExecProcessResponse) ProtoMessage() {} + +func (x *ExecProcessResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *CloseIORequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CloseIORequest.Merge(m, src) -} -func (m *CloseIORequest) XXX_Size() int { - return m.Size() -} -func (m *CloseIORequest) XXX_DiscardUnknown() { - xxx_messageInfo_CloseIORequest.DiscardUnknown(m) + +// Deprecated: Use ExecProcessResponse.ProtoReflect.Descriptor instead. +func (*ExecProcessResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{13} } -var xxx_messageInfo_CloseIORequest proto.InternalMessageInfo +type ResizePtyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -type PauseTaskRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + Width uint32 `protobuf:"varint,3,opt,name=width,proto3" json:"width,omitempty"` + Height uint32 `protobuf:"varint,4,opt,name=height,proto3" json:"height,omitempty"` } -func (m *PauseTaskRequest) Reset() { *m = PauseTaskRequest{} } -func (*PauseTaskRequest) ProtoMessage() {} -func (*PauseTaskRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{16} -} -func (m *PauseTaskRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PauseTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PauseTaskRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ResizePtyRequest) Reset() { + *x = ResizePtyRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *PauseTaskRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PauseTaskRequest.Merge(m, src) -} -func (m *PauseTaskRequest) XXX_Size() int { - return m.Size() -} -func (m *PauseTaskRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PauseTaskRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_PauseTaskRequest proto.InternalMessageInfo -type ResumeTaskRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *ResizePtyRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ResumeTaskRequest) Reset() { *m = ResumeTaskRequest{} } -func (*ResumeTaskRequest) ProtoMessage() {} -func (*ResumeTaskRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{17} -} -func (m *ResumeTaskRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResumeTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResumeTaskRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*ResizePtyRequest) ProtoMessage() {} + +func (x *ResizePtyRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *ResumeTaskRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResumeTaskRequest.Merge(m, src) -} -func (m *ResumeTaskRequest) XXX_Size() int { - return m.Size() -} -func (m *ResumeTaskRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ResumeTaskRequest.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_ResumeTaskRequest proto.InternalMessageInfo - -type ListPidsRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +// Deprecated: Use ResizePtyRequest.ProtoReflect.Descriptor instead. +func (*ResizePtyRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{14} } -func (m *ListPidsRequest) Reset() { *m = ListPidsRequest{} } -func (*ListPidsRequest) ProtoMessage() {} -func (*ListPidsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{18} -} -func (m *ListPidsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListPidsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListPidsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ResizePtyRequest) GetContainerID() string { + if x != nil { + return x.ContainerID } + return "" } -func (m *ListPidsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListPidsRequest.Merge(m, src) + +func (x *ResizePtyRequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" } -func (m *ListPidsRequest) XXX_Size() int { - return m.Size() + +func (x *ResizePtyRequest) GetWidth() uint32 { + if x != nil { + return x.Width + } + return 0 } -func (m *ListPidsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListPidsRequest.DiscardUnknown(m) + +func (x *ResizePtyRequest) GetHeight() uint32 { + if x != nil { + return x.Height + } + return 0 } -var xxx_messageInfo_ListPidsRequest proto.InternalMessageInfo +type CloseIORequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -type ListPidsResponse struct { - // Processes includes the process ID and additional process information - Processes []*task.ProcessInfo `protobuf:"bytes,1,rep,name=processes,proto3" json:"processes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` + Stdin bool `protobuf:"varint,3,opt,name=stdin,proto3" json:"stdin,omitempty"` } -func (m *ListPidsResponse) Reset() { *m = ListPidsResponse{} } -func (*ListPidsResponse) ProtoMessage() {} -func (*ListPidsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{19} -} -func (m *ListPidsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListPidsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListPidsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *CloseIORequest) Reset() { + *x = CloseIORequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ListPidsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListPidsResponse.Merge(m, src) -} -func (m *ListPidsResponse) XXX_Size() int { - return m.Size() -} -func (m *ListPidsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListPidsResponse.DiscardUnknown(m) -} -var xxx_messageInfo_ListPidsResponse proto.InternalMessageInfo - -type CheckpointTaskRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ParentCheckpoint github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=parent_checkpoint,json=parentCheckpoint,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"parent_checkpoint"` - Options *types1.Any `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *CloseIORequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *CheckpointTaskRequest) Reset() { *m = CheckpointTaskRequest{} } -func (*CheckpointTaskRequest) ProtoMessage() {} -func (*CheckpointTaskRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{20} -} -func (m *CheckpointTaskRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CheckpointTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CheckpointTaskRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*CloseIORequest) ProtoMessage() {} + +func (x *CloseIORequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *CheckpointTaskRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CheckpointTaskRequest.Merge(m, src) -} -func (m *CheckpointTaskRequest) XXX_Size() int { - return m.Size() -} -func (m *CheckpointTaskRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CheckpointTaskRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CheckpointTaskRequest proto.InternalMessageInfo -type CheckpointTaskResponse struct { - Descriptors []*types.Descriptor `protobuf:"bytes,1,rep,name=descriptors,proto3" json:"descriptors,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +// Deprecated: Use CloseIORequest.ProtoReflect.Descriptor instead. +func (*CloseIORequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{15} } -func (m *CheckpointTaskResponse) Reset() { *m = CheckpointTaskResponse{} } -func (*CheckpointTaskResponse) ProtoMessage() {} -func (*CheckpointTaskResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{21} -} -func (m *CheckpointTaskResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CheckpointTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CheckpointTaskResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *CloseIORequest) GetContainerID() string { + if x != nil { + return x.ContainerID } + return "" } -func (m *CheckpointTaskResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CheckpointTaskResponse.Merge(m, src) + +func (x *CloseIORequest) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" } -func (m *CheckpointTaskResponse) XXX_Size() int { - return m.Size() + +func (x *CloseIORequest) GetStdin() bool { + if x != nil { + return x.Stdin + } + return false } -func (m *CheckpointTaskResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CheckpointTaskResponse.DiscardUnknown(m) + +type PauseTaskRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` } -var xxx_messageInfo_CheckpointTaskResponse proto.InternalMessageInfo +func (x *PauseTaskRequest) Reset() { + *x = PauseTaskRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type UpdateTaskRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - Resources *types1.Any `protobuf:"bytes,2,opt,name=resources,proto3" json:"resources,omitempty"` - Annotations map[string]string `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *PauseTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *UpdateTaskRequest) Reset() { *m = UpdateTaskRequest{} } -func (*UpdateTaskRequest) ProtoMessage() {} -func (*UpdateTaskRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{22} -} -func (m *UpdateTaskRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UpdateTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UpdateTaskRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*PauseTaskRequest) ProtoMessage() {} + +func (x *PauseTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *UpdateTaskRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateTaskRequest.Merge(m, src) + +// Deprecated: Use PauseTaskRequest.ProtoReflect.Descriptor instead. +func (*PauseTaskRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{16} } -func (m *UpdateTaskRequest) XXX_Size() int { - return m.Size() + +func (x *PauseTaskRequest) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" } -func (m *UpdateTaskRequest) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateTaskRequest.DiscardUnknown(m) + +type ResumeTaskRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` } -var xxx_messageInfo_UpdateTaskRequest proto.InternalMessageInfo +func (x *ResumeTaskRequest) Reset() { + *x = ResumeTaskRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type MetricsRequest struct { - Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *ResumeTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *MetricsRequest) Reset() { *m = MetricsRequest{} } -func (*MetricsRequest) ProtoMessage() {} -func (*MetricsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{23} -} -func (m *MetricsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MetricsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*ResumeTaskRequest) ProtoMessage() {} + +func (x *ResumeTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *MetricsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricsRequest.Merge(m, src) + +// Deprecated: Use ResumeTaskRequest.ProtoReflect.Descriptor instead. +func (*ResumeTaskRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{17} } -func (m *MetricsRequest) XXX_Size() int { - return m.Size() + +func (x *ResumeTaskRequest) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" } -func (m *MetricsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_MetricsRequest.DiscardUnknown(m) + +type ListPidsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` } -var xxx_messageInfo_MetricsRequest proto.InternalMessageInfo +func (x *ListPidsRequest) Reset() { + *x = ListPidsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type MetricsResponse struct { - Metrics []*types.Metric `protobuf:"bytes,1,rep,name=metrics,proto3" json:"metrics,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *ListPidsRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *MetricsResponse) Reset() { *m = MetricsResponse{} } -func (*MetricsResponse) ProtoMessage() {} -func (*MetricsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{24} -} -func (m *MetricsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MetricsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*ListPidsRequest) ProtoMessage() {} + +func (x *ListPidsRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *MetricsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricsResponse.Merge(m, src) + +// Deprecated: Use ListPidsRequest.ProtoReflect.Descriptor instead. +func (*ListPidsRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{18} } -func (m *MetricsResponse) XXX_Size() int { - return m.Size() + +func (x *ListPidsRequest) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" } -func (m *MetricsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MetricsResponse.DiscardUnknown(m) + +type ListPidsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Processes includes the process ID and additional process information + Processes []*task.ProcessInfo `protobuf:"bytes,1,rep,name=processes,proto3" json:"processes,omitempty"` } -var xxx_messageInfo_MetricsResponse proto.InternalMessageInfo +func (x *ListPidsResponse) Reset() { + *x = ListPidsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type WaitRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *ListPidsResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *WaitRequest) Reset() { *m = WaitRequest{} } -func (*WaitRequest) ProtoMessage() {} -func (*WaitRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{25} -} -func (m *WaitRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WaitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WaitRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*ListPidsResponse) ProtoMessage() {} + +func (x *ListPidsResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *WaitRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WaitRequest.Merge(m, src) + +// Deprecated: Use ListPidsResponse.ProtoReflect.Descriptor instead. +func (*ListPidsResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{19} } -func (m *WaitRequest) XXX_Size() int { - return m.Size() + +func (x *ListPidsResponse) GetProcesses() []*task.ProcessInfo { + if x != nil { + return x.Processes + } + return nil } -func (m *WaitRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WaitRequest.DiscardUnknown(m) + +type CheckpointTaskRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ParentCheckpoint string `protobuf:"bytes,2,opt,name=parent_checkpoint,json=parentCheckpoint,proto3" json:"parent_checkpoint,omitempty"` + Options *anypb.Any `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"` } -var xxx_messageInfo_WaitRequest proto.InternalMessageInfo +func (x *CheckpointTaskRequest) Reset() { + *x = CheckpointTaskRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} -type WaitResponse struct { - ExitStatus uint32 `protobuf:"varint,1,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` - ExitedAt time.Time `protobuf:"bytes,2,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *CheckpointTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *WaitResponse) Reset() { *m = WaitResponse{} } -func (*WaitResponse) ProtoMessage() {} -func (*WaitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_310e7127b8a26f14, []int{26} -} -func (m *WaitResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WaitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WaitResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*CheckpointTaskRequest) ProtoMessage() {} + +func (x *CheckpointTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *WaitResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_WaitResponse.Merge(m, src) -} -func (m *WaitResponse) XXX_Size() int { - return m.Size() -} -func (m *WaitResponse) XXX_DiscardUnknown() { - xxx_messageInfo_WaitResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_WaitResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*CreateTaskRequest)(nil), "containerd.services.tasks.v1.CreateTaskRequest") - proto.RegisterType((*CreateTaskResponse)(nil), "containerd.services.tasks.v1.CreateTaskResponse") - proto.RegisterType((*StartRequest)(nil), "containerd.services.tasks.v1.StartRequest") - proto.RegisterType((*StartResponse)(nil), "containerd.services.tasks.v1.StartResponse") - proto.RegisterType((*DeleteTaskRequest)(nil), "containerd.services.tasks.v1.DeleteTaskRequest") - proto.RegisterType((*DeleteResponse)(nil), "containerd.services.tasks.v1.DeleteResponse") - proto.RegisterType((*DeleteProcessRequest)(nil), "containerd.services.tasks.v1.DeleteProcessRequest") - proto.RegisterType((*GetRequest)(nil), "containerd.services.tasks.v1.GetRequest") - proto.RegisterType((*GetResponse)(nil), "containerd.services.tasks.v1.GetResponse") - proto.RegisterType((*ListTasksRequest)(nil), "containerd.services.tasks.v1.ListTasksRequest") - proto.RegisterType((*ListTasksResponse)(nil), "containerd.services.tasks.v1.ListTasksResponse") - proto.RegisterType((*KillRequest)(nil), "containerd.services.tasks.v1.KillRequest") - proto.RegisterType((*ExecProcessRequest)(nil), "containerd.services.tasks.v1.ExecProcessRequest") - proto.RegisterType((*ExecProcessResponse)(nil), "containerd.services.tasks.v1.ExecProcessResponse") - proto.RegisterType((*ResizePtyRequest)(nil), "containerd.services.tasks.v1.ResizePtyRequest") - proto.RegisterType((*CloseIORequest)(nil), "containerd.services.tasks.v1.CloseIORequest") - proto.RegisterType((*PauseTaskRequest)(nil), "containerd.services.tasks.v1.PauseTaskRequest") - proto.RegisterType((*ResumeTaskRequest)(nil), "containerd.services.tasks.v1.ResumeTaskRequest") - proto.RegisterType((*ListPidsRequest)(nil), "containerd.services.tasks.v1.ListPidsRequest") - proto.RegisterType((*ListPidsResponse)(nil), "containerd.services.tasks.v1.ListPidsResponse") - proto.RegisterType((*CheckpointTaskRequest)(nil), "containerd.services.tasks.v1.CheckpointTaskRequest") - proto.RegisterType((*CheckpointTaskResponse)(nil), "containerd.services.tasks.v1.CheckpointTaskResponse") - proto.RegisterType((*UpdateTaskRequest)(nil), "containerd.services.tasks.v1.UpdateTaskRequest") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.tasks.v1.UpdateTaskRequest.AnnotationsEntry") - proto.RegisterType((*MetricsRequest)(nil), "containerd.services.tasks.v1.MetricsRequest") - proto.RegisterType((*MetricsResponse)(nil), "containerd.services.tasks.v1.MetricsResponse") - proto.RegisterType((*WaitRequest)(nil), "containerd.services.tasks.v1.WaitRequest") - proto.RegisterType((*WaitResponse)(nil), "containerd.services.tasks.v1.WaitResponse") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/tasks/v1/tasks.proto", fileDescriptor_310e7127b8a26f14) -} - -var fileDescriptor_310e7127b8a26f14 = []byte{ - // 1400 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x5b, 0x6f, 0x1b, 0x45, - 0x14, 0xee, 0xfa, 0xee, 0xe3, 0xa4, 0x4d, 0x96, 0x34, 0x98, 0xa5, 0x8a, 0xd3, 0xe5, 0xc5, 0x04, - 0xba, 0xa6, 0x2e, 0xaa, 0xaa, 0xb6, 0xaa, 0xc8, 0x8d, 0xc8, 0x82, 0xaa, 0xe9, 0xb6, 0x40, 0x55, - 0x09, 0x99, 0x8d, 0x77, 0x62, 0x8f, 0x62, 0xef, 0x6c, 0x77, 0xc6, 0x69, 0xcd, 0x0b, 0xfc, 0x84, - 0xbe, 0xf2, 0x02, 0x7f, 0xa7, 0x8f, 0x3c, 0x22, 0x54, 0x05, 0xea, 0x57, 0x7e, 0x01, 0x6f, 0x68, - 0x2e, 0xbb, 0xde, 0xd8, 0xf1, 0x25, 0x4d, 0xc3, 0x4b, 0x3b, 0x33, 0x7b, 0xce, 0x99, 0x33, 0xdf, - 0xb9, 0x7d, 0x0e, 0x6c, 0x34, 0x31, 0x6b, 0x75, 0xf7, 0xac, 0x06, 0xe9, 0x54, 0x1a, 0xc4, 0x63, - 0x0e, 0xf6, 0x50, 0xe0, 0xc6, 0x97, 0x8e, 0x8f, 0x2b, 0x14, 0x05, 0x87, 0xb8, 0x81, 0x68, 0x85, - 0x39, 0xf4, 0x80, 0x56, 0x0e, 0xaf, 0xcb, 0x85, 0xe5, 0x07, 0x84, 0x11, 0xfd, 0xca, 0x40, 0xda, - 0x0a, 0x25, 0x2d, 0x29, 0x70, 0x78, 0xdd, 0xf8, 0xb0, 0x49, 0x48, 0xb3, 0x8d, 0x2a, 0x42, 0x76, - 0xaf, 0xbb, 0x5f, 0x41, 0x1d, 0x9f, 0xf5, 0xa4, 0xaa, 0xf1, 0xc1, 0xf0, 0x47, 0xc7, 0x0b, 0x3f, - 0x2d, 0x35, 0x49, 0x93, 0x88, 0x65, 0x85, 0xaf, 0xd4, 0xe9, 0xcd, 0x99, 0xfc, 0x65, 0x3d, 0x1f, - 0xd1, 0x4a, 0x87, 0x74, 0x3d, 0xa6, 0xf4, 0x6e, 0x9d, 0x46, 0x0f, 0xb1, 0x00, 0x37, 0xd4, 0xeb, - 0x8c, 0x3b, 0xa7, 0xd0, 0x74, 0x11, 0x6d, 0x04, 0xd8, 0x67, 0x24, 0x50, 0xca, 0xb7, 0x4f, 0xa1, - 0xcc, 0x11, 0x13, 0xff, 0x28, 0xdd, 0xd2, 0x30, 0x36, 0x0c, 0x77, 0x10, 0x65, 0x4e, 0xc7, 0x97, - 0x02, 0xe6, 0x3f, 0x09, 0x58, 0xdc, 0x0c, 0x90, 0xc3, 0xd0, 0x63, 0x87, 0x1e, 0xd8, 0xe8, 0x59, - 0x17, 0x51, 0xa6, 0x57, 0x61, 0x2e, 0x32, 0x5f, 0xc7, 0x6e, 0x51, 0x5b, 0xd5, 0xca, 0xf9, 0x8d, - 0x4b, 0xfd, 0xa3, 0x52, 0x61, 0x33, 0x3c, 0xaf, 0x6d, 0xd9, 0x85, 0x48, 0xa8, 0xe6, 0xea, 0x15, - 0xc8, 0x04, 0x84, 0xb0, 0x7d, 0x5a, 0x4c, 0xae, 0x26, 0xcb, 0x85, 0xea, 0xfb, 0x56, 0x2c, 0xa4, - 0xc2, 0x3b, 0xeb, 0x3e, 0x07, 0xd3, 0x56, 0x62, 0xfa, 0x12, 0xa4, 0x29, 0x73, 0xb1, 0x57, 0x4c, - 0x71, 0xeb, 0xb6, 0xdc, 0xe8, 0xcb, 0x90, 0xa1, 0xcc, 0x25, 0x5d, 0x56, 0x4c, 0x8b, 0x63, 0xb5, - 0x53, 0xe7, 0x28, 0x08, 0x8a, 0x99, 0xe8, 0x1c, 0x05, 0x81, 0x6e, 0x40, 0x8e, 0xa1, 0xa0, 0x83, - 0x3d, 0xa7, 0x5d, 0xcc, 0xae, 0x6a, 0xe5, 0x9c, 0x1d, 0xed, 0xf5, 0xbb, 0x00, 0x8d, 0x16, 0x6a, - 0x1c, 0xf8, 0x04, 0x7b, 0xac, 0x98, 0x5b, 0xd5, 0xca, 0x85, 0xea, 0x95, 0x51, 0xb7, 0xb6, 0x22, - 0xc4, 0xed, 0x98, 0xbc, 0x6e, 0x41, 0x96, 0xf8, 0x0c, 0x13, 0x8f, 0x16, 0xf3, 0x42, 0x75, 0xc9, - 0x92, 0x68, 0x5a, 0x21, 0x9a, 0xd6, 0xba, 0xd7, 0xb3, 0x43, 0x21, 0xfd, 0x2a, 0xcc, 0x05, 0x5d, - 0x8f, 0x03, 0x5c, 0xf7, 0x1d, 0xd6, 0x2a, 0x82, 0xf0, 0xb3, 0xa0, 0xce, 0x76, 0x1d, 0xd6, 0x32, - 0x9f, 0x82, 0x1e, 0x07, 0x9b, 0xfa, 0xc4, 0xa3, 0xe8, 0xad, 0xd0, 0x5e, 0x80, 0xa4, 0x8f, 0xdd, - 0x62, 0x62, 0x55, 0x2b, 0xcf, 0xdb, 0x7c, 0x69, 0x36, 0x61, 0xee, 0x11, 0x73, 0x02, 0x76, 0x96, - 0x18, 0x7e, 0x04, 0x59, 0xf4, 0x02, 0x35, 0xea, 0xca, 0x72, 0x7e, 0x03, 0xfa, 0x47, 0xa5, 0xcc, - 0xf6, 0x0b, 0xd4, 0xa8, 0x6d, 0xd9, 0x19, 0xfe, 0xa9, 0xe6, 0x9a, 0x57, 0x61, 0x5e, 0x5d, 0xa4, - 0xfc, 0x57, 0xbe, 0x68, 0x03, 0x5f, 0x76, 0x60, 0x71, 0x0b, 0xb5, 0xd1, 0x99, 0x93, 0xca, 0xfc, - 0x55, 0x83, 0x8b, 0xd2, 0x52, 0x74, 0xdb, 0x32, 0x24, 0x22, 0xe5, 0x4c, 0xff, 0xa8, 0x94, 0xa8, - 0x6d, 0xd9, 0x09, 0x7c, 0x02, 0x22, 0x7a, 0x09, 0x0a, 0xe8, 0x05, 0x66, 0x75, 0xca, 0x1c, 0xd6, - 0xe5, 0x69, 0xc9, 0xbf, 0x00, 0x3f, 0x7a, 0x24, 0x4e, 0xf4, 0x75, 0xc8, 0xf3, 0x1d, 0x72, 0xeb, - 0x0e, 0x13, 0x59, 0x58, 0xa8, 0x1a, 0x23, 0x31, 0x7e, 0x1c, 0x56, 0xcc, 0x46, 0xee, 0xd5, 0x51, - 0xe9, 0xc2, 0xcb, 0xbf, 0x4a, 0x9a, 0x9d, 0x93, 0x6a, 0xeb, 0xcc, 0x24, 0xb0, 0x24, 0xfd, 0xdb, - 0x0d, 0x48, 0x03, 0x51, 0x7a, 0xee, 0xe8, 0x23, 0x80, 0x1d, 0x74, 0xfe, 0x41, 0xde, 0x86, 0x82, - 0xb8, 0x46, 0x81, 0x7e, 0x13, 0xb2, 0xbe, 0x7c, 0xa0, 0xb8, 0x62, 0xa8, 0x8c, 0x0e, 0xaf, 0xab, - 0x4a, 0x0a, 0x41, 0x08, 0x85, 0xcd, 0x35, 0x58, 0xf8, 0x1a, 0x53, 0xc6, 0xd3, 0x20, 0x82, 0x66, - 0x19, 0x32, 0xfb, 0xb8, 0xcd, 0x50, 0x20, 0xbd, 0xb5, 0xd5, 0x8e, 0x27, 0x4d, 0x4c, 0x36, 0xaa, - 0x8d, 0xb4, 0x98, 0x02, 0x45, 0x4d, 0x34, 0x95, 0xc9, 0xd7, 0x4a, 0x51, 0xf3, 0xa5, 0x06, 0x85, - 0xaf, 0x70, 0xbb, 0x7d, 0xde, 0x20, 0x89, 0x9e, 0x84, 0x9b, 0xbc, 0xf3, 0xc8, 0xdc, 0x52, 0x3b, - 0x9e, 0x8a, 0x4e, 0xbb, 0x2d, 0x32, 0x2a, 0x67, 0xf3, 0xa5, 0xf9, 0xaf, 0x06, 0x3a, 0x57, 0x7e, - 0x07, 0x59, 0x12, 0xb5, 0xcd, 0xc4, 0xc9, 0x6d, 0x33, 0x39, 0xa6, 0x6d, 0xa6, 0xc6, 0xb6, 0xcd, - 0xf4, 0x50, 0xdb, 0x2c, 0x43, 0x8a, 0xfa, 0xa8, 0x21, 0x1a, 0xed, 0xb8, 0xae, 0x27, 0x24, 0xe2, - 0x28, 0x65, 0xc7, 0xa6, 0xd2, 0x65, 0x78, 0xef, 0xd8, 0xd3, 0x65, 0x64, 0xcd, 0x5f, 0x34, 0x58, - 0xb0, 0x11, 0xc5, 0x3f, 0xa2, 0x5d, 0xd6, 0x3b, 0xf7, 0x50, 0x2d, 0x41, 0xfa, 0x39, 0x76, 0x59, - 0x4b, 0x45, 0x4a, 0x6e, 0x38, 0x3a, 0x2d, 0x84, 0x9b, 0x2d, 0x59, 0xfd, 0xf3, 0xb6, 0xda, 0x99, - 0x3f, 0xc1, 0xc5, 0xcd, 0x36, 0xa1, 0xa8, 0xf6, 0xe0, 0xff, 0x70, 0x4c, 0x86, 0x33, 0x29, 0xa2, - 0x20, 0x37, 0xe6, 0x97, 0xb0, 0xb0, 0xeb, 0x74, 0xe9, 0x99, 0xfb, 0xe7, 0x0e, 0x2c, 0xda, 0x88, - 0x76, 0x3b, 0x67, 0x36, 0xb4, 0x0d, 0x97, 0x78, 0x71, 0xee, 0x62, 0xf7, 0x2c, 0xc9, 0x6b, 0xda, - 0xb2, 0x1f, 0x48, 0x33, 0xaa, 0xc4, 0xef, 0x41, 0x5e, 0xb5, 0x0b, 0x14, 0x96, 0xf9, 0xea, 0xa4, - 0x32, 0xaf, 0x79, 0xfb, 0xc4, 0x1e, 0xa8, 0x98, 0xaf, 0x35, 0xb8, 0xbc, 0x19, 0x8d, 0xed, 0xb3, - 0xd2, 0x98, 0x3a, 0x2c, 0xfa, 0x4e, 0x80, 0x3c, 0x56, 0x8f, 0x51, 0x07, 0x19, 0xbe, 0x2a, 0xef, - 0xff, 0x7f, 0x1e, 0x95, 0xd6, 0x62, 0x84, 0x8c, 0xf8, 0xc8, 0x8b, 0xd4, 0x69, 0xa5, 0x49, 0xae, - 0xb9, 0xb8, 0x89, 0x28, 0xb3, 0xb6, 0xc4, 0x7f, 0xf6, 0x82, 0x34, 0xb6, 0x79, 0x22, 0xad, 0x48, - 0xce, 0x40, 0x2b, 0xcc, 0x27, 0xb0, 0x3c, 0xfc, 0xba, 0x08, 0xb8, 0xc2, 0x80, 0x2c, 0x9e, 0xd8, - 0x21, 0x47, 0xf8, 0x4d, 0x5c, 0xc1, 0xfc, 0x2d, 0x01, 0x8b, 0xdf, 0xf8, 0xee, 0x3b, 0xe0, 0x7e, - 0x55, 0xc8, 0x07, 0x88, 0x92, 0x6e, 0xd0, 0x40, 0x54, 0x80, 0x35, 0xee, 0x55, 0x03, 0x31, 0x7d, - 0x0f, 0x0a, 0x8e, 0xe7, 0x11, 0xe6, 0x84, 0x58, 0x70, 0xef, 0xbf, 0xb0, 0x26, 0xfd, 0x0e, 0xb0, - 0x46, 0xbc, 0xb5, 0xd6, 0x07, 0x26, 0xb6, 0x3d, 0x16, 0xf4, 0xec, 0xb8, 0x51, 0xe3, 0x1e, 0x2c, - 0x0c, 0x0b, 0xf0, 0xe6, 0x7c, 0x80, 0x7a, 0x6a, 0xf6, 0xf0, 0x25, 0x2f, 0xc1, 0x43, 0xa7, 0xdd, - 0x45, 0x61, 0x47, 0x15, 0x9b, 0xdb, 0x89, 0x5b, 0x9a, 0xb9, 0x06, 0x17, 0xef, 0x4b, 0x22, 0x1f, - 0xa2, 0x53, 0x84, 0xac, 0x1c, 0x57, 0x12, 0xef, 0xbc, 0x1d, 0x6e, 0x79, 0x85, 0x44, 0xb2, 0xd1, - 0xf0, 0xca, 0xaa, 0xdf, 0x01, 0x2a, 0x38, 0xc5, 0x13, 0x38, 0xb1, 0x10, 0xb0, 0x43, 0x41, 0x73, - 0x1f, 0x0a, 0xdf, 0x39, 0xf8, 0xfc, 0x07, 0x7c, 0x00, 0x73, 0xf2, 0x1e, 0xe5, 0xeb, 0x10, 0x59, - 0xd2, 0x26, 0x93, 0xa5, 0xc4, 0xdb, 0x90, 0xa5, 0xea, 0xeb, 0x39, 0x48, 0x8b, 0xf1, 0xae, 0x1f, - 0x40, 0x46, 0x12, 0x61, 0xbd, 0x32, 0x39, 0xe2, 0x23, 0xbf, 0x4d, 0x8c, 0xcf, 0x66, 0x57, 0x50, - 0x4f, 0xfb, 0x01, 0xd2, 0x82, 0xb0, 0xea, 0x6b, 0x93, 0x55, 0xe3, 0xf4, 0xd9, 0xf8, 0x64, 0x26, - 0x59, 0x75, 0x43, 0x13, 0x32, 0x92, 0x05, 0x4e, 0x7b, 0xce, 0x08, 0x2b, 0x36, 0x3e, 0x9d, 0x45, - 0x21, 0xba, 0xe8, 0x19, 0xcc, 0x1f, 0xa3, 0x9b, 0x7a, 0x75, 0x16, 0xf5, 0xe3, 0xac, 0xe3, 0x94, - 0x57, 0x3e, 0x85, 0xe4, 0x0e, 0x62, 0x7a, 0x79, 0xb2, 0xd2, 0x80, 0x93, 0x1a, 0x1f, 0xcf, 0x20, - 0x19, 0xe1, 0x96, 0xe2, 0xe3, 0x40, 0xb7, 0x26, 0xab, 0x0c, 0x53, 0x48, 0xa3, 0x32, 0xb3, 0xbc, - 0xba, 0xa8, 0x06, 0x29, 0xce, 0x08, 0xf5, 0x29, 0xbe, 0xc5, 0x58, 0xa3, 0xb1, 0x3c, 0x92, 0xdc, - 0xdb, 0x1d, 0x9f, 0xf5, 0xf4, 0x5d, 0x48, 0xf1, 0x52, 0xd2, 0xa7, 0xe4, 0xe1, 0x28, 0xdb, 0x1b, - 0x6b, 0xf1, 0x11, 0xe4, 0x23, 0x22, 0x34, 0x0d, 0x8a, 0x61, 0xc6, 0x34, 0xd6, 0xe8, 0x03, 0xc8, - 0x2a, 0x0a, 0xa3, 0x4f, 0x89, 0xf7, 0x71, 0xa6, 0x33, 0xc1, 0x60, 0x5a, 0x50, 0x92, 0x69, 0x1e, - 0x0e, 0xf3, 0x96, 0xb1, 0x06, 0x1f, 0x42, 0x46, 0x72, 0x93, 0x69, 0x45, 0x33, 0xc2, 0x60, 0xc6, - 0x9a, 0xc4, 0x90, 0x0b, 0xe9, 0x85, 0x7e, 0x6d, 0x7a, 0x8e, 0xc4, 0xd8, 0x8c, 0x61, 0xcd, 0x2a, - 0xae, 0x32, 0xea, 0x39, 0x40, 0x6c, 0xa8, 0xdf, 0x98, 0x02, 0xf1, 0x49, 0xf4, 0xc4, 0xf8, 0xfc, - 0x74, 0x4a, 0xea, 0xe2, 0x87, 0x90, 0x91, 0x63, 0x70, 0x1a, 0x6c, 0x23, 0xc3, 0x72, 0x2c, 0x6c, - 0xfb, 0x90, 0x55, 0xa3, 0x6b, 0x5a, 0xae, 0x1c, 0x9f, 0x86, 0xc6, 0xb5, 0x19, 0xa5, 0x95, 0xeb, - 0xdf, 0x43, 0x8a, 0xcf, 0x9c, 0x69, 0x55, 0x18, 0x9b, 0x7f, 0xc6, 0xda, 0x2c, 0xa2, 0xd2, 0xfc, - 0xc6, 0xb7, 0xaf, 0xde, 0xac, 0x5c, 0xf8, 0xe3, 0xcd, 0xca, 0x85, 0x9f, 0xfb, 0x2b, 0xda, 0xab, - 0xfe, 0x8a, 0xf6, 0x7b, 0x7f, 0x45, 0xfb, 0xbb, 0xbf, 0xa2, 0x3d, 0xbd, 0xfb, 0x76, 0x7f, 0xa1, - 0xbc, 0x23, 0x16, 0x4f, 0x12, 0x7b, 0x19, 0x01, 0xd8, 0x8d, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, - 0xc7, 0x3c, 0xaa, 0x56, 0xea, 0x14, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// TasksClient is the client API for Tasks service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type TasksClient interface { - // Create a task. - Create(ctx context.Context, in *CreateTaskRequest, opts ...grpc.CallOption) (*CreateTaskResponse, error) - // Start a process. - Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) - // Delete a task and on disk state. - Delete(ctx context.Context, in *DeleteTaskRequest, opts ...grpc.CallOption) (*DeleteResponse, error) - DeleteProcess(ctx context.Context, in *DeleteProcessRequest, opts ...grpc.CallOption) (*DeleteResponse, error) - Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) - List(ctx context.Context, in *ListTasksRequest, opts ...grpc.CallOption) (*ListTasksResponse, error) - // Kill a task or process. - Kill(ctx context.Context, in *KillRequest, opts ...grpc.CallOption) (*types1.Empty, error) - Exec(ctx context.Context, in *ExecProcessRequest, opts ...grpc.CallOption) (*types1.Empty, error) - ResizePty(ctx context.Context, in *ResizePtyRequest, opts ...grpc.CallOption) (*types1.Empty, error) - CloseIO(ctx context.Context, in *CloseIORequest, opts ...grpc.CallOption) (*types1.Empty, error) - Pause(ctx context.Context, in *PauseTaskRequest, opts ...grpc.CallOption) (*types1.Empty, error) - Resume(ctx context.Context, in *ResumeTaskRequest, opts ...grpc.CallOption) (*types1.Empty, error) - ListPids(ctx context.Context, in *ListPidsRequest, opts ...grpc.CallOption) (*ListPidsResponse, error) - Checkpoint(ctx context.Context, in *CheckpointTaskRequest, opts ...grpc.CallOption) (*CheckpointTaskResponse, error) - Update(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*types1.Empty, error) - Metrics(ctx context.Context, in *MetricsRequest, opts ...grpc.CallOption) (*MetricsResponse, error) - Wait(ctx context.Context, in *WaitRequest, opts ...grpc.CallOption) (*WaitResponse, error) -} - -type tasksClient struct { - cc *grpc.ClientConn -} - -func NewTasksClient(cc *grpc.ClientConn) TasksClient { - return &tasksClient{cc} -} - -func (c *tasksClient) Create(ctx context.Context, in *CreateTaskRequest, opts ...grpc.CallOption) (*CreateTaskResponse, error) { - out := new(CreateTaskResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Create", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil + +// Deprecated: Use CheckpointTaskRequest.ProtoReflect.Descriptor instead. +func (*CheckpointTaskRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{20} } -func (c *tasksClient) Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) { - out := new(StartResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Start", in, out, opts...) - if err != nil { - return nil, err +func (x *CheckpointTaskRequest) GetContainerID() string { + if x != nil { + return x.ContainerID } - return out, nil + return "" } -func (c *tasksClient) Delete(ctx context.Context, in *DeleteTaskRequest, opts ...grpc.CallOption) (*DeleteResponse, error) { - out := new(DeleteResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Delete", in, out, opts...) - if err != nil { - return nil, err +func (x *CheckpointTaskRequest) GetParentCheckpoint() string { + if x != nil { + return x.ParentCheckpoint } - return out, nil + return "" } -func (c *tasksClient) DeleteProcess(ctx context.Context, in *DeleteProcessRequest, opts ...grpc.CallOption) (*DeleteResponse, error) { - out := new(DeleteResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/DeleteProcess", in, out, opts...) - if err != nil { - return nil, err +func (x *CheckpointTaskRequest) GetOptions() *anypb.Any { + if x != nil { + return x.Options } - return out, nil + return nil } -func (c *tasksClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { - out := new(GetResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Get", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +type CheckpointTaskResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Descriptors []*types.Descriptor `protobuf:"bytes,1,rep,name=descriptors,proto3" json:"descriptors,omitempty"` } -func (c *tasksClient) List(ctx context.Context, in *ListTasksRequest, opts ...grpc.CallOption) (*ListTasksResponse, error) { - out := new(ListTasksResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/List", in, out, opts...) - if err != nil { - return nil, err +func (x *CheckpointTaskResponse) Reset() { + *x = CheckpointTaskResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return out, nil } -func (c *tasksClient) Kill(ctx context.Context, in *KillRequest, opts ...grpc.CallOption) (*types1.Empty, error) { - out := new(types1.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Kill", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +func (x *CheckpointTaskResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (c *tasksClient) Exec(ctx context.Context, in *ExecProcessRequest, opts ...grpc.CallOption) (*types1.Empty, error) { - out := new(types1.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Exec", in, out, opts...) - if err != nil { - return nil, err +func (*CheckpointTaskResponse) ProtoMessage() {} + +func (x *CheckpointTaskResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return out, nil + return mi.MessageOf(x) } -func (c *tasksClient) ResizePty(ctx context.Context, in *ResizePtyRequest, opts ...grpc.CallOption) (*types1.Empty, error) { - out := new(types1.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/ResizePty", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +// Deprecated: Use CheckpointTaskResponse.ProtoReflect.Descriptor instead. +func (*CheckpointTaskResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{21} } -func (c *tasksClient) CloseIO(ctx context.Context, in *CloseIORequest, opts ...grpc.CallOption) (*types1.Empty, error) { - out := new(types1.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/CloseIO", in, out, opts...) - if err != nil { - return nil, err +func (x *CheckpointTaskResponse) GetDescriptors() []*types.Descriptor { + if x != nil { + return x.Descriptors } - return out, nil + return nil } -func (c *tasksClient) Pause(ctx context.Context, in *PauseTaskRequest, opts ...grpc.CallOption) (*types1.Empty, error) { - out := new(types1.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Pause", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +type UpdateTaskRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + Resources *anypb.Any `protobuf:"bytes,2,opt,name=resources,proto3" json:"resources,omitempty"` + Annotations map[string]string `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (c *tasksClient) Resume(ctx context.Context, in *ResumeTaskRequest, opts ...grpc.CallOption) (*types1.Empty, error) { - out := new(types1.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Resume", in, out, opts...) - if err != nil { - return nil, err +func (x *UpdateTaskRequest) Reset() { + *x = UpdateTaskRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return out, nil } -func (c *tasksClient) ListPids(ctx context.Context, in *ListPidsRequest, opts ...grpc.CallOption) (*ListPidsResponse, error) { - out := new(ListPidsResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/ListPids", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +func (x *UpdateTaskRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (c *tasksClient) Checkpoint(ctx context.Context, in *CheckpointTaskRequest, opts ...grpc.CallOption) (*CheckpointTaskResponse, error) { - out := new(CheckpointTaskResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Checkpoint", in, out, opts...) - if err != nil { - return nil, err +func (*UpdateTaskRequest) ProtoMessage() {} + +func (x *UpdateTaskRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return out, nil + return mi.MessageOf(x) } -func (c *tasksClient) Update(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*types1.Empty, error) { - out := new(types1.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Update", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +// Deprecated: Use UpdateTaskRequest.ProtoReflect.Descriptor instead. +func (*UpdateTaskRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{22} } -func (c *tasksClient) Metrics(ctx context.Context, in *MetricsRequest, opts ...grpc.CallOption) (*MetricsResponse, error) { - out := new(MetricsResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Metrics", in, out, opts...) - if err != nil { - return nil, err +func (x *UpdateTaskRequest) GetContainerID() string { + if x != nil { + return x.ContainerID } - return out, nil + return "" } -func (c *tasksClient) Wait(ctx context.Context, in *WaitRequest, opts ...grpc.CallOption) (*WaitResponse, error) { - out := new(WaitResponse) - err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Wait", in, out, opts...) - if err != nil { - return nil, err +func (x *UpdateTaskRequest) GetResources() *anypb.Any { + if x != nil { + return x.Resources } - return out, nil + return nil } -// TasksServer is the server API for Tasks service. -type TasksServer interface { - // Create a task. - Create(context.Context, *CreateTaskRequest) (*CreateTaskResponse, error) - // Start a process. - Start(context.Context, *StartRequest) (*StartResponse, error) - // Delete a task and on disk state. - Delete(context.Context, *DeleteTaskRequest) (*DeleteResponse, error) - DeleteProcess(context.Context, *DeleteProcessRequest) (*DeleteResponse, error) - Get(context.Context, *GetRequest) (*GetResponse, error) - List(context.Context, *ListTasksRequest) (*ListTasksResponse, error) - // Kill a task or process. - Kill(context.Context, *KillRequest) (*types1.Empty, error) - Exec(context.Context, *ExecProcessRequest) (*types1.Empty, error) - ResizePty(context.Context, *ResizePtyRequest) (*types1.Empty, error) - CloseIO(context.Context, *CloseIORequest) (*types1.Empty, error) - Pause(context.Context, *PauseTaskRequest) (*types1.Empty, error) - Resume(context.Context, *ResumeTaskRequest) (*types1.Empty, error) - ListPids(context.Context, *ListPidsRequest) (*ListPidsResponse, error) - Checkpoint(context.Context, *CheckpointTaskRequest) (*CheckpointTaskResponse, error) - Update(context.Context, *UpdateTaskRequest) (*types1.Empty, error) - Metrics(context.Context, *MetricsRequest) (*MetricsResponse, error) - Wait(context.Context, *WaitRequest) (*WaitResponse, error) +func (x *UpdateTaskRequest) GetAnnotations() map[string]string { + if x != nil { + return x.Annotations + } + return nil } -// UnimplementedTasksServer can be embedded to have forward compatible implementations. -type UnimplementedTasksServer struct { -} +type MetricsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (*UnimplementedTasksServer) Create(ctx context.Context, req *CreateTaskRequest) (*CreateTaskResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") -} -func (*UnimplementedTasksServer) Start(ctx context.Context, req *StartRequest) (*StartResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Start not implemented") + Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` } -func (*UnimplementedTasksServer) Delete(ctx context.Context, req *DeleteTaskRequest) (*DeleteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") -} -func (*UnimplementedTasksServer) DeleteProcess(ctx context.Context, req *DeleteProcessRequest) (*DeleteResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteProcess not implemented") -} -func (*UnimplementedTasksServer) Get(ctx context.Context, req *GetRequest) (*GetResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") -} -func (*UnimplementedTasksServer) List(ctx context.Context, req *ListTasksRequest) (*ListTasksResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method List not implemented") -} -func (*UnimplementedTasksServer) Kill(ctx context.Context, req *KillRequest) (*types1.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Kill not implemented") + +func (x *MetricsRequest) Reset() { + *x = MetricsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (*UnimplementedTasksServer) Exec(ctx context.Context, req *ExecProcessRequest) (*types1.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Exec not implemented") + +func (x *MetricsRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func (*UnimplementedTasksServer) ResizePty(ctx context.Context, req *ResizePtyRequest) (*types1.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method ResizePty not implemented") + +func (*MetricsRequest) ProtoMessage() {} + +func (x *MetricsRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (*UnimplementedTasksServer) CloseIO(ctx context.Context, req *CloseIORequest) (*types1.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method CloseIO not implemented") + +// Deprecated: Use MetricsRequest.ProtoReflect.Descriptor instead. +func (*MetricsRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{23} } -func (*UnimplementedTasksServer) Pause(ctx context.Context, req *PauseTaskRequest) (*types1.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Pause not implemented") + +func (x *MetricsRequest) GetFilters() []string { + if x != nil { + return x.Filters + } + return nil } -func (*UnimplementedTasksServer) Resume(ctx context.Context, req *ResumeTaskRequest) (*types1.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Resume not implemented") + +type MetricsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Metrics []*types.Metric `protobuf:"bytes,1,rep,name=metrics,proto3" json:"metrics,omitempty"` } -func (*UnimplementedTasksServer) ListPids(ctx context.Context, req *ListPidsRequest) (*ListPidsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListPids not implemented") + +func (x *MetricsResponse) Reset() { + *x = MetricsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (*UnimplementedTasksServer) Checkpoint(ctx context.Context, req *CheckpointTaskRequest) (*CheckpointTaskResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Checkpoint not implemented") + +func (x *MetricsResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func (*UnimplementedTasksServer) Update(ctx context.Context, req *UpdateTaskRequest) (*types1.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") + +func (*MetricsResponse) ProtoMessage() {} + +func (x *MetricsResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func (*UnimplementedTasksServer) Metrics(ctx context.Context, req *MetricsRequest) (*MetricsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Metrics not implemented") + +// Deprecated: Use MetricsResponse.ProtoReflect.Descriptor instead. +func (*MetricsResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{24} } -func (*UnimplementedTasksServer) Wait(ctx context.Context, req *WaitRequest) (*WaitResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Wait not implemented") + +func (x *MetricsResponse) GetMetrics() []*types.Metric { + if x != nil { + return x.Metrics + } + return nil } -func RegisterTasksServer(s *grpc.Server, srv TasksServer) { - s.RegisterService(&_Tasks_serviceDesc, srv) +type WaitRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` } -func _Tasks_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateTaskRequest) - if err := dec(in); err != nil { - return nil, err +func (x *WaitRequest) Reset() { + *x = WaitRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - if interceptor == nil { - return srv.(TasksServer).Create(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Create", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Create(ctx, req.(*CreateTaskRequest)) - } - return interceptor(ctx, in, info, handler) } -func _Tasks_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StartRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Start(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Start", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Start(ctx, req.(*StartRequest)) - } - return interceptor(ctx, in, info, handler) +func (x *WaitRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -func _Tasks_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteTaskRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Delete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Delete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Delete(ctx, req.(*DeleteTaskRequest)) +func (*WaitRequest) ProtoMessage() {} + +func (x *WaitRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return interceptor(ctx, in, info, handler) + return mi.MessageOf(x) } -func _Tasks_DeleteProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteProcessRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).DeleteProcess(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/DeleteProcess", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).DeleteProcess(ctx, req.(*DeleteProcessRequest)) - } - return interceptor(ctx, in, info, handler) +// Deprecated: Use WaitRequest.ProtoReflect.Descriptor instead. +func (*WaitRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{25} } -func _Tasks_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Get(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Get", +func (x *WaitRequest) GetContainerID() string { + if x != nil { + return x.ContainerID } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Get(ctx, req.(*GetRequest)) - } - return interceptor(ctx, in, info, handler) + return "" } -func _Tasks_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListTasksRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).List(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/List", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).List(ctx, req.(*ListTasksRequest)) +func (x *WaitRequest) GetExecID() string { + if x != nil { + return x.ExecID } - return interceptor(ctx, in, info, handler) + return "" } -func _Tasks_Kill_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(KillRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Kill(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Kill", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Kill(ctx, req.(*KillRequest)) - } - return interceptor(ctx, in, info, handler) +type WaitResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ExitStatus uint32 `protobuf:"varint,1,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=exited_at,json=exitedAt,proto3" json:"exited_at,omitempty"` } -func _Tasks_Exec_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ExecProcessRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Exec(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Exec", +func (x *WaitResponse) Reset() { + *x = WaitResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Exec(ctx, req.(*ExecProcessRequest)) - } - return interceptor(ctx, in, info, handler) } -func _Tasks_ResizePty_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ResizePtyRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).ResizePty(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/ResizePty", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).ResizePty(ctx, req.(*ResizePtyRequest)) - } - return interceptor(ctx, in, info, handler) +func (x *WaitResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -func _Tasks_CloseIO_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CloseIORequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).CloseIO(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/CloseIO", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).CloseIO(ctx, req.(*CloseIORequest)) +func (*WaitResponse) ProtoMessage() {} + +func (x *WaitResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return interceptor(ctx, in, info, handler) + return mi.MessageOf(x) } -func _Tasks_Pause_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PauseTaskRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Pause(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Pause", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Pause(ctx, req.(*PauseTaskRequest)) - } - return interceptor(ctx, in, info, handler) +// Deprecated: Use WaitResponse.ProtoReflect.Descriptor instead. +func (*WaitResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP(), []int{26} } -func _Tasks_Resume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ResumeTaskRequest) - if err := dec(in); err != nil { - return nil, err +func (x *WaitResponse) GetExitStatus() uint32 { + if x != nil { + return x.ExitStatus } - if interceptor == nil { - return srv.(TasksServer).Resume(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Resume", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Resume(ctx, req.(*ResumeTaskRequest)) - } - return interceptor(ctx, in, info, handler) + return 0 } -func _Tasks_ListPids_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListPidsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).ListPids(ctx, in) +func (x *WaitResponse) GetExitedAt() *timestamppb.Timestamp { + if x != nil { + return x.ExitedAt } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/ListPids", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).ListPids(ctx, req.(*ListPidsRequest)) - } - return interceptor(ctx, in, info, handler) + return nil } -func _Tasks_Checkpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CheckpointTaskRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Checkpoint(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Checkpoint", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Checkpoint(ctx, req.(*CheckpointTaskRequest)) - } - return interceptor(ctx, in, info, handler) +var File_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDesc = []byte{ + 0x0a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, + 0x76, 0x31, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x36, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x3b, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xda, 0x02, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, + 0x2f, 0x0a, 0x06, 0x72, 0x6f, 0x6f, 0x74, 0x66, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x06, 0x72, 0x6f, 0x6f, 0x74, 0x66, 0x73, + 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x64, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x73, 0x74, 0x64, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x12, 0x16, + 0x0a, 0x06, 0x73, 0x74, 0x64, 0x65, 0x72, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x73, 0x74, 0x64, 0x65, 0x72, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, + 0x61, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, + 0x61, 0x6c, 0x12, 0x3c, 0x0a, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x6f, 0x72, 0x52, 0x0a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x12, 0x2e, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x50, + 0x61, 0x74, 0x68, 0x22, 0x49, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, + 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, + 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x22, 0x4a, + 0x0a, 0x0c, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, + 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, + 0x64, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, 0x22, 0x21, 0x0a, 0x0d, 0x53, 0x74, + 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x70, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x22, 0x36, 0x0a, + 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x49, 0x64, 0x22, 0x8c, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, + 0x69, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0a, 0x65, 0x78, 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x09, 0x65, + 0x78, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, + 0x65, 0x64, 0x41, 0x74, 0x22, 0x52, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x72, + 0x6f, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, + 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, 0x22, 0x48, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, + 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, + 0x49, 0x64, 0x22, 0x45, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x36, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x76, 0x31, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, + 0x52, 0x07, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x22, 0x2a, 0x0a, 0x10, 0x4c, 0x69, 0x73, + 0x74, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, + 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x47, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x73, + 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x05, 0x74, 0x61, + 0x73, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, + 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x52, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x22, 0x73, + 0x0a, 0x0b, 0x4b, 0x69, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, + 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, + 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x69, 0x67, + 0x6e, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x73, 0x69, 0x67, 0x6e, 0x61, + 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x6c, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, + 0x61, 0x6c, 0x6c, 0x22, 0xdc, 0x01, 0x0a, 0x12, 0x45, 0x78, 0x65, 0x63, 0x50, 0x72, 0x6f, 0x63, + 0x65, 0x73, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x14, 0x0a, + 0x05, 0x73, 0x74, 0x64, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, + 0x64, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, + 0x74, 0x64, 0x65, 0x72, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, + 0x65, 0x72, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x12, + 0x28, 0x0a, 0x04, 0x73, 0x70, 0x65, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x41, 0x6e, 0x79, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, + 0x63, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, + 0x49, 0x64, 0x22, 0x15, 0x0a, 0x13, 0x45, 0x78, 0x65, 0x63, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7c, 0x0a, 0x10, 0x52, 0x65, 0x73, + 0x69, 0x7a, 0x65, 0x50, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, + 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, + 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x77, 0x69, 0x64, + 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x77, 0x69, 0x64, 0x74, 0x68, 0x12, + 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x62, 0x0a, 0x0e, 0x43, 0x6c, 0x6f, 0x73, 0x65, + 0x49, 0x4f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, + 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, + 0x78, 0x65, 0x63, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x64, 0x69, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x73, 0x74, 0x64, 0x69, 0x6e, 0x22, 0x35, 0x0a, 0x10, 0x50, + 0x61, 0x75, 0x73, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x49, 0x64, 0x22, 0x36, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x54, 0x61, 0x73, 0x6b, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x22, 0x34, 0x0a, 0x0f, 0x4c, 0x69, + 0x73, 0x74, 0x50, 0x69, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, + 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, + 0x22, 0x52, 0x0a, 0x10, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x69, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x72, + 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, + 0x73, 0x73, 0x65, 0x73, 0x22, 0x97, 0x01, 0x0a, 0x15, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, + 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, + 0x64, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, + 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x2e, + 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x58, + 0x0a, 0x16, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x22, 0x8e, 0x02, 0x0a, 0x11, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, + 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, + 0x64, 0x12, 0x32, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x62, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x2a, 0x0a, 0x0e, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x73, 0x22, 0x45, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x22, 0x49, 0x0a, 0x0b, + 0x57, 0x61, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x17, + 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, 0x22, 0x68, 0x0a, 0x0c, 0x57, 0x61, 0x69, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x69, 0x74, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x65, 0x78, + 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x09, 0x65, 0x78, 0x69, 0x74, + 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x41, + 0x74, 0x32, 0xdc, 0x0c, 0x0a, 0x05, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x12, 0x6b, 0x0a, 0x06, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, + 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x72, + 0x74, 0x12, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, + 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x67, 0x0a, 0x06, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x12, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x71, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x72, 0x6f, + 0x63, 0x65, 0x73, 0x73, 0x12, 0x32, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, + 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x28, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, + 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x67, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, + 0x73, 0x6b, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, + 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, + 0x73, 0x6b, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x04, 0x4b, + 0x69, 0x6c, 0x6c, 0x12, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x4b, 0x69, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x50, 0x0a, 0x04, 0x45, 0x78, 0x65, 0x63, 0x12, 0x30, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, + 0x65, 0x63, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x53, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x69, + 0x7a, 0x65, 0x50, 0x74, 0x79, 0x12, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x69, 0x7a, 0x65, 0x50, 0x74, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4f, 0x0a, + 0x07, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x49, 0x4f, 0x12, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, + 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x49, 0x4f, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4f, + 0x0a, 0x05, 0x50, 0x61, 0x75, 0x73, 0x65, 0x12, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, + 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x75, 0x73, 0x65, 0x54, 0x61, 0x73, 0x6b, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, + 0x51, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x12, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, + 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x54, + 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x12, 0x69, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x69, 0x64, 0x73, 0x12, 0x2d, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x50, 0x69, 0x64, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, + 0x74, 0x50, 0x69, 0x64, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x77, 0x0a, + 0x0a, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x33, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x34, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x12, 0x2f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x66, 0x0a, 0x07, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x12, 0x2c, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x5d, 0x0a, 0x04, 0x57, 0x61, 0x69, 0x74, 0x12, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, + 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x61, 0x69, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x61, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x42, 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2f, 0x76, 0x31, 0x3b, 0x74, 0x61, 0x73, 0x6b, 0x73, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func _Tasks_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateTaskRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Update(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Update", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Update(ctx, req.(*UpdateTaskRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_Metrics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MetricsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Metrics(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Metrics", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Metrics(ctx, req.(*MetricsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Tasks_Wait_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(WaitRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(TasksServer).Wait(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.tasks.v1.Tasks/Wait", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(TasksServer).Wait(ctx, req.(*WaitRequest)) - } - return interceptor(ctx, in, info, handler) -} +var ( + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescData = file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDesc +) -var _Tasks_serviceDesc = grpc.ServiceDesc{ - ServiceName: "containerd.services.tasks.v1.Tasks", - HandlerType: (*TasksServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Create", - Handler: _Tasks_Create_Handler, - }, - { - MethodName: "Start", - Handler: _Tasks_Start_Handler, - }, - { - MethodName: "Delete", - Handler: _Tasks_Delete_Handler, - }, - { - MethodName: "DeleteProcess", - Handler: _Tasks_DeleteProcess_Handler, - }, - { - MethodName: "Get", - Handler: _Tasks_Get_Handler, - }, - { - MethodName: "List", - Handler: _Tasks_List_Handler, - }, - { - MethodName: "Kill", - Handler: _Tasks_Kill_Handler, - }, - { - MethodName: "Exec", - Handler: _Tasks_Exec_Handler, - }, - { - MethodName: "ResizePty", - Handler: _Tasks_ResizePty_Handler, - }, - { - MethodName: "CloseIO", - Handler: _Tasks_CloseIO_Handler, - }, - { - MethodName: "Pause", - Handler: _Tasks_Pause_Handler, - }, - { - MethodName: "Resume", - Handler: _Tasks_Resume_Handler, - }, - { - MethodName: "ListPids", - Handler: _Tasks_ListPids_Handler, - }, - { - MethodName: "Checkpoint", - Handler: _Tasks_Checkpoint_Handler, - }, - { - MethodName: "Update", - Handler: _Tasks_Update_Handler, +func file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes = make([]protoimpl.MessageInfo, 28) +var file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_goTypes = []interface{}{ + (*CreateTaskRequest)(nil), // 0: containerd.services.tasks.v1.CreateTaskRequest + (*CreateTaskResponse)(nil), // 1: containerd.services.tasks.v1.CreateTaskResponse + (*StartRequest)(nil), // 2: containerd.services.tasks.v1.StartRequest + (*StartResponse)(nil), // 3: containerd.services.tasks.v1.StartResponse + (*DeleteTaskRequest)(nil), // 4: containerd.services.tasks.v1.DeleteTaskRequest + (*DeleteResponse)(nil), // 5: containerd.services.tasks.v1.DeleteResponse + (*DeleteProcessRequest)(nil), // 6: containerd.services.tasks.v1.DeleteProcessRequest + (*GetRequest)(nil), // 7: containerd.services.tasks.v1.GetRequest + (*GetResponse)(nil), // 8: containerd.services.tasks.v1.GetResponse + (*ListTasksRequest)(nil), // 9: containerd.services.tasks.v1.ListTasksRequest + (*ListTasksResponse)(nil), // 10: containerd.services.tasks.v1.ListTasksResponse + (*KillRequest)(nil), // 11: containerd.services.tasks.v1.KillRequest + (*ExecProcessRequest)(nil), // 12: containerd.services.tasks.v1.ExecProcessRequest + (*ExecProcessResponse)(nil), // 13: containerd.services.tasks.v1.ExecProcessResponse + (*ResizePtyRequest)(nil), // 14: containerd.services.tasks.v1.ResizePtyRequest + (*CloseIORequest)(nil), // 15: containerd.services.tasks.v1.CloseIORequest + (*PauseTaskRequest)(nil), // 16: containerd.services.tasks.v1.PauseTaskRequest + (*ResumeTaskRequest)(nil), // 17: containerd.services.tasks.v1.ResumeTaskRequest + (*ListPidsRequest)(nil), // 18: containerd.services.tasks.v1.ListPidsRequest + (*ListPidsResponse)(nil), // 19: containerd.services.tasks.v1.ListPidsResponse + (*CheckpointTaskRequest)(nil), // 20: containerd.services.tasks.v1.CheckpointTaskRequest + (*CheckpointTaskResponse)(nil), // 21: containerd.services.tasks.v1.CheckpointTaskResponse + (*UpdateTaskRequest)(nil), // 22: containerd.services.tasks.v1.UpdateTaskRequest + (*MetricsRequest)(nil), // 23: containerd.services.tasks.v1.MetricsRequest + (*MetricsResponse)(nil), // 24: containerd.services.tasks.v1.MetricsResponse + (*WaitRequest)(nil), // 25: containerd.services.tasks.v1.WaitRequest + (*WaitResponse)(nil), // 26: containerd.services.tasks.v1.WaitResponse + nil, // 27: containerd.services.tasks.v1.UpdateTaskRequest.AnnotationsEntry + (*types.Mount)(nil), // 28: containerd.types.Mount + (*types.Descriptor)(nil), // 29: containerd.types.Descriptor + (*anypb.Any)(nil), // 30: google.protobuf.Any + (*timestamppb.Timestamp)(nil), // 31: google.protobuf.Timestamp + (*task.Process)(nil), // 32: containerd.v1.types.Process + (*task.ProcessInfo)(nil), // 33: containerd.v1.types.ProcessInfo + (*types.Metric)(nil), // 34: containerd.types.Metric + (*emptypb.Empty)(nil), // 35: google.protobuf.Empty +} +var file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_depIdxs = []int32{ + 28, // 0: containerd.services.tasks.v1.CreateTaskRequest.rootfs:type_name -> containerd.types.Mount + 29, // 1: containerd.services.tasks.v1.CreateTaskRequest.checkpoint:type_name -> containerd.types.Descriptor + 30, // 2: containerd.services.tasks.v1.CreateTaskRequest.options:type_name -> google.protobuf.Any + 31, // 3: containerd.services.tasks.v1.DeleteResponse.exited_at:type_name -> google.protobuf.Timestamp + 32, // 4: containerd.services.tasks.v1.GetResponse.process:type_name -> containerd.v1.types.Process + 32, // 5: containerd.services.tasks.v1.ListTasksResponse.tasks:type_name -> containerd.v1.types.Process + 30, // 6: containerd.services.tasks.v1.ExecProcessRequest.spec:type_name -> google.protobuf.Any + 33, // 7: containerd.services.tasks.v1.ListPidsResponse.processes:type_name -> containerd.v1.types.ProcessInfo + 30, // 8: containerd.services.tasks.v1.CheckpointTaskRequest.options:type_name -> google.protobuf.Any + 29, // 9: containerd.services.tasks.v1.CheckpointTaskResponse.descriptors:type_name -> containerd.types.Descriptor + 30, // 10: containerd.services.tasks.v1.UpdateTaskRequest.resources:type_name -> google.protobuf.Any + 27, // 11: containerd.services.tasks.v1.UpdateTaskRequest.annotations:type_name -> containerd.services.tasks.v1.UpdateTaskRequest.AnnotationsEntry + 34, // 12: containerd.services.tasks.v1.MetricsResponse.metrics:type_name -> containerd.types.Metric + 31, // 13: containerd.services.tasks.v1.WaitResponse.exited_at:type_name -> google.protobuf.Timestamp + 0, // 14: containerd.services.tasks.v1.Tasks.Create:input_type -> containerd.services.tasks.v1.CreateTaskRequest + 2, // 15: containerd.services.tasks.v1.Tasks.Start:input_type -> containerd.services.tasks.v1.StartRequest + 4, // 16: containerd.services.tasks.v1.Tasks.Delete:input_type -> containerd.services.tasks.v1.DeleteTaskRequest + 6, // 17: containerd.services.tasks.v1.Tasks.DeleteProcess:input_type -> containerd.services.tasks.v1.DeleteProcessRequest + 7, // 18: containerd.services.tasks.v1.Tasks.Get:input_type -> containerd.services.tasks.v1.GetRequest + 9, // 19: containerd.services.tasks.v1.Tasks.List:input_type -> containerd.services.tasks.v1.ListTasksRequest + 11, // 20: containerd.services.tasks.v1.Tasks.Kill:input_type -> containerd.services.tasks.v1.KillRequest + 12, // 21: containerd.services.tasks.v1.Tasks.Exec:input_type -> containerd.services.tasks.v1.ExecProcessRequest + 14, // 22: containerd.services.tasks.v1.Tasks.ResizePty:input_type -> containerd.services.tasks.v1.ResizePtyRequest + 15, // 23: containerd.services.tasks.v1.Tasks.CloseIO:input_type -> containerd.services.tasks.v1.CloseIORequest + 16, // 24: containerd.services.tasks.v1.Tasks.Pause:input_type -> containerd.services.tasks.v1.PauseTaskRequest + 17, // 25: containerd.services.tasks.v1.Tasks.Resume:input_type -> containerd.services.tasks.v1.ResumeTaskRequest + 18, // 26: containerd.services.tasks.v1.Tasks.ListPids:input_type -> containerd.services.tasks.v1.ListPidsRequest + 20, // 27: containerd.services.tasks.v1.Tasks.Checkpoint:input_type -> containerd.services.tasks.v1.CheckpointTaskRequest + 22, // 28: containerd.services.tasks.v1.Tasks.Update:input_type -> containerd.services.tasks.v1.UpdateTaskRequest + 23, // 29: containerd.services.tasks.v1.Tasks.Metrics:input_type -> containerd.services.tasks.v1.MetricsRequest + 25, // 30: containerd.services.tasks.v1.Tasks.Wait:input_type -> containerd.services.tasks.v1.WaitRequest + 1, // 31: containerd.services.tasks.v1.Tasks.Create:output_type -> containerd.services.tasks.v1.CreateTaskResponse + 3, // 32: containerd.services.tasks.v1.Tasks.Start:output_type -> containerd.services.tasks.v1.StartResponse + 5, // 33: containerd.services.tasks.v1.Tasks.Delete:output_type -> containerd.services.tasks.v1.DeleteResponse + 5, // 34: containerd.services.tasks.v1.Tasks.DeleteProcess:output_type -> containerd.services.tasks.v1.DeleteResponse + 8, // 35: containerd.services.tasks.v1.Tasks.Get:output_type -> containerd.services.tasks.v1.GetResponse + 10, // 36: containerd.services.tasks.v1.Tasks.List:output_type -> containerd.services.tasks.v1.ListTasksResponse + 35, // 37: containerd.services.tasks.v1.Tasks.Kill:output_type -> google.protobuf.Empty + 35, // 38: containerd.services.tasks.v1.Tasks.Exec:output_type -> google.protobuf.Empty + 35, // 39: containerd.services.tasks.v1.Tasks.ResizePty:output_type -> google.protobuf.Empty + 35, // 40: containerd.services.tasks.v1.Tasks.CloseIO:output_type -> google.protobuf.Empty + 35, // 41: containerd.services.tasks.v1.Tasks.Pause:output_type -> google.protobuf.Empty + 35, // 42: containerd.services.tasks.v1.Tasks.Resume:output_type -> google.protobuf.Empty + 19, // 43: containerd.services.tasks.v1.Tasks.ListPids:output_type -> containerd.services.tasks.v1.ListPidsResponse + 21, // 44: containerd.services.tasks.v1.Tasks.Checkpoint:output_type -> containerd.services.tasks.v1.CheckpointTaskResponse + 35, // 45: containerd.services.tasks.v1.Tasks.Update:output_type -> google.protobuf.Empty + 24, // 46: containerd.services.tasks.v1.Tasks.Metrics:output_type -> containerd.services.tasks.v1.MetricsResponse + 26, // 47: containerd.services.tasks.v1.Tasks.Wait:output_type -> containerd.services.tasks.v1.WaitResponse + 31, // [31:48] is the sub-list for method output_type + 14, // [14:31] is the sub-list for method input_type + 14, // [14:14] is the sub-list for extension type_name + 14, // [14:14] is the sub-list for extension extendee + 0, // [0:14] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_init() } +func file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_init() { + if File_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateTaskRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateTaskResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StartRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StartResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteTaskRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteProcessRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListTasksRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListTasksResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KillRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecProcessRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExecProcessResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResizePtyRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CloseIORequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PauseTaskRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResumeTaskRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListPidsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListPidsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckpointTaskRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckpointTaskResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateTaskRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetricsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetricsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WaitRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WaitResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDesc, + NumEnums: 0, + NumMessages: 28, + NumExtensions: 0, + NumServices: 1, }, - { - MethodName: "Metrics", - Handler: _Tasks_Metrics_Handler, - }, - { - MethodName: "Wait", - Handler: _Tasks_Wait_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "github.com/containerd/containerd/api/services/tasks/v1/tasks.proto", -} - -func (m *CreateTaskRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CreateTaskRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CreateTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.RuntimePath) > 0 { - i -= len(m.RuntimePath) - copy(dAtA[i:], m.RuntimePath) - i = encodeVarintTasks(dAtA, i, uint64(len(m.RuntimePath))) - i-- - dAtA[i] = 0x52 - } - if m.Options != nil { - { - size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTasks(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - if m.Checkpoint != nil { - { - size, err := m.Checkpoint.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTasks(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if m.Terminal { - i-- - if m.Terminal { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x38 - } - if len(m.Stderr) > 0 { - i -= len(m.Stderr) - copy(dAtA[i:], m.Stderr) - i = encodeVarintTasks(dAtA, i, uint64(len(m.Stderr))) - i-- - dAtA[i] = 0x32 - } - if len(m.Stdout) > 0 { - i -= len(m.Stdout) - copy(dAtA[i:], m.Stdout) - i = encodeVarintTasks(dAtA, i, uint64(len(m.Stdout))) - i-- - dAtA[i] = 0x2a - } - if len(m.Stdin) > 0 { - i -= len(m.Stdin) - copy(dAtA[i:], m.Stdin) - i = encodeVarintTasks(dAtA, i, uint64(len(m.Stdin))) - i-- - dAtA[i] = 0x22 - } - if len(m.Rootfs) > 0 { - for iNdEx := len(m.Rootfs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Rootfs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTasks(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CreateTaskResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CreateTaskResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CreateTaskResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Pid != 0 { - i = encodeVarintTasks(dAtA, i, uint64(m.Pid)) - i-- - dAtA[i] = 0x10 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *StartRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StartRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StartRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *StartResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StartResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StartResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Pid != 0 { - i = encodeVarintTasks(dAtA, i, uint64(m.Pid)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *DeleteTaskRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteTaskRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DeleteResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt):]) - if err3 != nil { - return 0, err3 - } - i -= n3 - i = encodeVarintTasks(dAtA, i, uint64(n3)) - i-- - dAtA[i] = 0x22 - if m.ExitStatus != 0 { - i = encodeVarintTasks(dAtA, i, uint64(m.ExitStatus)) - i-- - dAtA[i] = 0x18 - } - if m.Pid != 0 { - i = encodeVarintTasks(dAtA, i, uint64(m.Pid)) - i-- - dAtA[i] = 0x10 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DeleteProcessRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteProcessRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteProcessRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + GoTypes: file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto = out.File + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_tasks_v1_tasks_proto_depIdxs = nil } - -func (m *GetResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Process != nil { - { - size, err := m.Process.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTasks(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ListTasksRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListTasksRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListTasksRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Filter) > 0 { - i -= len(m.Filter) - copy(dAtA[i:], m.Filter) - i = encodeVarintTasks(dAtA, i, uint64(len(m.Filter))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ListTasksResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListTasksResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListTasksResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Tasks) > 0 { - for iNdEx := len(m.Tasks) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Tasks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTasks(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *KillRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *KillRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *KillRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.All { - i-- - if m.All { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if m.Signal != 0 { - i = encodeVarintTasks(dAtA, i, uint64(m.Signal)) - i-- - dAtA[i] = 0x18 - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ExecProcessRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExecProcessRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExecProcessRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x3a - } - if m.Spec != nil { - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTasks(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if m.Terminal { - i-- - if m.Terminal { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if len(m.Stderr) > 0 { - i -= len(m.Stderr) - copy(dAtA[i:], m.Stderr) - i = encodeVarintTasks(dAtA, i, uint64(len(m.Stderr))) - i-- - dAtA[i] = 0x22 - } - if len(m.Stdout) > 0 { - i -= len(m.Stdout) - copy(dAtA[i:], m.Stdout) - i = encodeVarintTasks(dAtA, i, uint64(len(m.Stdout))) - i-- - dAtA[i] = 0x1a - } - if len(m.Stdin) > 0 { - i -= len(m.Stdin) - copy(dAtA[i:], m.Stdin) - i = encodeVarintTasks(dAtA, i, uint64(len(m.Stdin))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ExecProcessResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExecProcessResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExecProcessResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *ResizePtyRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResizePtyRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResizePtyRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Height != 0 { - i = encodeVarintTasks(dAtA, i, uint64(m.Height)) - i-- - dAtA[i] = 0x20 - } - if m.Width != 0 { - i = encodeVarintTasks(dAtA, i, uint64(m.Width)) - i-- - dAtA[i] = 0x18 - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CloseIORequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CloseIORequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CloseIORequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Stdin { - i-- - if m.Stdin { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PauseTaskRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PauseTaskRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PauseTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ResumeTaskRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResumeTaskRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResumeTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ListPidsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListPidsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListPidsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ListPidsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListPidsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListPidsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Processes) > 0 { - for iNdEx := len(m.Processes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Processes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTasks(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *CheckpointTaskRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CheckpointTaskRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CheckpointTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Options != nil { - { - size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTasks(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.ParentCheckpoint) > 0 { - i -= len(m.ParentCheckpoint) - copy(dAtA[i:], m.ParentCheckpoint) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ParentCheckpoint))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CheckpointTaskResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CheckpointTaskResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CheckpointTaskResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Descriptors) > 0 { - for iNdEx := len(m.Descriptors) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Descriptors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTasks(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *UpdateTaskRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UpdateTaskRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UpdateTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Annotations) > 0 { - for k := range m.Annotations { - v := m.Annotations[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintTasks(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintTasks(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintTasks(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a - } - } - if m.Resources != nil { - { - size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTasks(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MetricsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Filters) > 0 { - for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Filters[iNdEx]) - copy(dAtA[i:], m.Filters[iNdEx]) - i = encodeVarintTasks(dAtA, i, uint64(len(m.Filters[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *MetricsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Metrics) > 0 { - for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Metrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTasks(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *WaitRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WaitRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WaitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *WaitResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WaitResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WaitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt):]) - if err8 != nil { - return 0, err8 - } - i -= n8 - i = encodeVarintTasks(dAtA, i, uint64(n8)) - i-- - dAtA[i] = 0x12 - if m.ExitStatus != 0 { - i = encodeVarintTasks(dAtA, i, uint64(m.ExitStatus)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintTasks(dAtA []byte, offset int, v uint64) int { - offset -= sovTasks(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *CreateTaskRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if len(m.Rootfs) > 0 { - for _, e := range m.Rootfs { - l = e.Size() - n += 1 + l + sovTasks(uint64(l)) - } - } - l = len(m.Stdin) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.Stdout) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.Stderr) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.Terminal { - n += 2 - } - if m.Checkpoint != nil { - l = m.Checkpoint.Size() - n += 1 + l + sovTasks(uint64(l)) - } - if m.Options != nil { - l = m.Options.Size() - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.RuntimePath) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CreateTaskResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.Pid != 0 { - n += 1 + sovTasks(uint64(m.Pid)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StartRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StartResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Pid != 0 { - n += 1 + sovTasks(uint64(m.Pid)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DeleteTaskRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DeleteResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.Pid != 0 { - n += 1 + sovTasks(uint64(m.Pid)) - } - if m.ExitStatus != 0 { - n += 1 + sovTasks(uint64(m.ExitStatus)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt) - n += 1 + l + sovTasks(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DeleteProcessRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GetRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GetResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Process != nil { - l = m.Process.Size() - n += 1 + l + sovTasks(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListTasksRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Filter) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListTasksResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Tasks) > 0 { - for _, e := range m.Tasks { - l = e.Size() - n += 1 + l + sovTasks(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *KillRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.Signal != 0 { - n += 1 + sovTasks(uint64(m.Signal)) - } - if m.All { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ExecProcessRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.Stdin) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.Stdout) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.Stderr) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.Terminal { - n += 2 - } - if m.Spec != nil { - l = m.Spec.Size() - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ExecProcessResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ResizePtyRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.Width != 0 { - n += 1 + sovTasks(uint64(m.Width)) - } - if m.Height != 0 { - n += 1 + sovTasks(uint64(m.Height)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CloseIORequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.Stdin { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PauseTaskRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ResumeTaskRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListPidsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListPidsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Processes) > 0 { - for _, e := range m.Processes { - l = e.Size() - n += 1 + l + sovTasks(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CheckpointTaskRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.ParentCheckpoint) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.Options != nil { - l = m.Options.Size() - n += 1 + l + sovTasks(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CheckpointTaskResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Descriptors) > 0 { - for _, e := range m.Descriptors { - l = e.Size() - n += 1 + l + sovTasks(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UpdateTaskRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.Resources != nil { - l = m.Resources.Size() - n += 1 + l + sovTasks(uint64(l)) - } - if len(m.Annotations) > 0 { - for k, v := range m.Annotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovTasks(uint64(len(k))) + 1 + len(v) + sovTasks(uint64(len(v))) - n += mapEntrySize + 1 + sovTasks(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MetricsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Filters) > 0 { - for _, s := range m.Filters { - l = len(s) - n += 1 + l + sovTasks(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MetricsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Metrics) > 0 { - for _, e := range m.Metrics { - l = e.Size() - n += 1 + l + sovTasks(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *WaitRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovTasks(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *WaitResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ExitStatus != 0 { - n += 1 + sovTasks(uint64(m.ExitStatus)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt) - n += 1 + l + sovTasks(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovTasks(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozTasks(x uint64) (n int) { - return sovTasks(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *CreateTaskRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForRootfs := "[]*Mount{" - for _, f := range this.Rootfs { - repeatedStringForRootfs += strings.Replace(fmt.Sprintf("%v", f), "Mount", "types.Mount", 1) + "," - } - repeatedStringForRootfs += "}" - s := strings.Join([]string{`&CreateTaskRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `Rootfs:` + repeatedStringForRootfs + `,`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, - `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, - `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, - `Checkpoint:` + strings.Replace(fmt.Sprintf("%v", this.Checkpoint), "Descriptor", "types.Descriptor", 1) + `,`, - `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "types1.Any", 1) + `,`, - `RuntimePath:` + fmt.Sprintf("%v", this.RuntimePath) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CreateTaskResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CreateTaskResponse{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *StartRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StartRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *StartResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StartResponse{`, - `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DeleteTaskRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeleteTaskRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DeleteResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeleteResponse{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, - `ExitedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExitedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DeleteProcessRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeleteProcessRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *GetRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GetRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *GetResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GetResponse{`, - `Process:` + strings.Replace(fmt.Sprintf("%v", this.Process), "Process", "task.Process", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListTasksRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ListTasksRequest{`, - `Filter:` + fmt.Sprintf("%v", this.Filter) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListTasksResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForTasks := "[]*Process{" - for _, f := range this.Tasks { - repeatedStringForTasks += strings.Replace(fmt.Sprintf("%v", f), "Process", "task.Process", 1) + "," - } - repeatedStringForTasks += "}" - s := strings.Join([]string{`&ListTasksResponse{`, - `Tasks:` + repeatedStringForTasks + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *KillRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&KillRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `Signal:` + fmt.Sprintf("%v", this.Signal) + `,`, - `All:` + fmt.Sprintf("%v", this.All) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ExecProcessRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExecProcessRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, - `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, - `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, - `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "Any", "types1.Any", 1) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ExecProcessResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExecProcessResponse{`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ResizePtyRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResizePtyRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `Width:` + fmt.Sprintf("%v", this.Width) + `,`, - `Height:` + fmt.Sprintf("%v", this.Height) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CloseIORequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CloseIORequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *PauseTaskRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PauseTaskRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ResumeTaskRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResumeTaskRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListPidsRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ListPidsRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListPidsResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForProcesses := "[]*ProcessInfo{" - for _, f := range this.Processes { - repeatedStringForProcesses += strings.Replace(fmt.Sprintf("%v", f), "ProcessInfo", "task.ProcessInfo", 1) + "," - } - repeatedStringForProcesses += "}" - s := strings.Join([]string{`&ListPidsResponse{`, - `Processes:` + repeatedStringForProcesses + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CheckpointTaskRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CheckpointTaskRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `ParentCheckpoint:` + fmt.Sprintf("%v", this.ParentCheckpoint) + `,`, - `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "types1.Any", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CheckpointTaskResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForDescriptors := "[]*Descriptor{" - for _, f := range this.Descriptors { - repeatedStringForDescriptors += strings.Replace(fmt.Sprintf("%v", f), "Descriptor", "types.Descriptor", 1) + "," - } - repeatedStringForDescriptors += "}" - s := strings.Join([]string{`&CheckpointTaskResponse{`, - `Descriptors:` + repeatedStringForDescriptors + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UpdateTaskRequest) String() string { - if this == nil { - return "nil" - } - keysForAnnotations := make([]string, 0, len(this.Annotations)) - for k, _ := range this.Annotations { - keysForAnnotations = append(keysForAnnotations, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) - mapStringForAnnotations := "map[string]string{" - for _, k := range keysForAnnotations { - mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) - } - mapStringForAnnotations += "}" - s := strings.Join([]string{`&UpdateTaskRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "Any", "types1.Any", 1) + `,`, - `Annotations:` + mapStringForAnnotations + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *MetricsRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MetricsRequest{`, - `Filters:` + fmt.Sprintf("%v", this.Filters) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *MetricsResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForMetrics := "[]*Metric{" - for _, f := range this.Metrics { - repeatedStringForMetrics += strings.Replace(fmt.Sprintf("%v", f), "Metric", "types.Metric", 1) + "," - } - repeatedStringForMetrics += "}" - s := strings.Join([]string{`&MetricsResponse{`, - `Metrics:` + repeatedStringForMetrics + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *WaitRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WaitRequest{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *WaitResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WaitResponse{`, - `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, - `ExitedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExitedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringTasks(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CreateTaskRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CreateTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rootfs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rootfs = append(m.Rootfs, &types.Mount{}) - if err := m.Rootfs[len(m.Rootfs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdin = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdout = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stderr = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Terminal = bool(v != 0) - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Checkpoint", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Checkpoint == nil { - m.Checkpoint = &types.Descriptor{} - } - if err := m.Checkpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Options == nil { - m.Options = &types1.Any{} - } - if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimePath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RuntimePath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CreateTaskResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CreateTaskResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CreateTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) - } - m.Pid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StartRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StartRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StartRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StartResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StartResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StartResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) - } - m.Pid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteTaskRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteTaskRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) - } - m.Pid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) - } - m.ExitStatus = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExitStatus |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteProcessRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteProcessRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteProcessRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Process", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Process == nil { - m.Process = &task.Process{} - } - if err := m.Process.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListTasksRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListTasksRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListTasksRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListTasksResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListTasksResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListTasksResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tasks = append(m.Tasks, &task.Process{}) - if err := m.Tasks[len(m.Tasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *KillRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: KillRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: KillRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType) - } - m.Signal = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Signal |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field All", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.All = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExecProcessRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExecProcessRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExecProcessRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdin = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdout = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stderr = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Terminal = bool(v != 0) - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Spec == nil { - m.Spec = &types1.Any{} - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExecProcessResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExecProcessResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExecProcessResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResizePtyRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResizePtyRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResizePtyRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Width", wireType) - } - m.Width = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Width |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) - } - m.Height = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Height |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CloseIORequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CloseIORequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CloseIORequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Stdin = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PauseTaskRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PauseTaskRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PauseTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResumeTaskRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResumeTaskRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResumeTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListPidsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListPidsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListPidsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListPidsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListPidsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListPidsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Processes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Processes = append(m.Processes, &task.ProcessInfo{}) - if err := m.Processes[len(m.Processes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CheckpointTaskRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CheckpointTaskRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CheckpointTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ParentCheckpoint", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ParentCheckpoint = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Options == nil { - m.Options = &types1.Any{} - } - if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CheckpointTaskResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CheckpointTaskResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CheckpointTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Descriptors", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Descriptors = append(m.Descriptors, &types.Descriptor{}) - if err := m.Descriptors[len(m.Descriptors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UpdateTaskRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateTaskRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Resources == nil { - m.Resources = &types1.Any{} - } - if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Annotations == nil { - m.Annotations = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthTasks - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthTasks - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthTasks - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthTasks - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Annotations[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Metrics = append(m.Metrics, &types.Metric{}) - if err := m.Metrics[len(m.Metrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WaitRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WaitRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WaitRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WaitResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WaitResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WaitResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) - } - m.ExitStatus = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExitStatus |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTasks - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTasks - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTasks - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTasks(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTasks - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipTasks(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTasks - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTasks - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTasks - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthTasks - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupTasks - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthTasks - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthTasks = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTasks = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupTasks = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto index 6299c760265fb..8ddd319260299 100644 --- a/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto +++ b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto @@ -20,7 +20,6 @@ package containerd.services.tasks.v1; import "google/protobuf/empty.proto"; import "google/protobuf/any.proto"; -import weak "gogoproto/gogo.proto"; import "github.com/containerd/containerd/api/types/mount.proto"; import "github.com/containerd/containerd/api/types/metrics.proto"; import "github.com/containerd/containerd/api/types/descriptor.proto"; @@ -114,7 +113,7 @@ message DeleteResponse { string id = 1; uint32 pid = 2; uint32 exit_status = 3; - google.protobuf.Timestamp exited_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp exited_at = 4; } message DeleteProcessRequest { @@ -195,7 +194,7 @@ message ListPidsResponse { message CheckpointTaskRequest { string container_id = 1; - string parent_checkpoint = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + string parent_checkpoint = 2; google.protobuf.Any options = 3; } @@ -224,5 +223,5 @@ message WaitRequest { message WaitResponse { uint32 exit_status = 1; - google.protobuf.Timestamp exited_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp exited_at = 2; } diff --git a/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks_grpc.pb.go new file mode 100644 index 0000000000000..3fd4057e9b9b8 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks_grpc.pb.go @@ -0,0 +1,690 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/tasks/v1/tasks.proto + +package tasks + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// TasksClient is the client API for Tasks service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type TasksClient interface { + // Create a task. + Create(ctx context.Context, in *CreateTaskRequest, opts ...grpc.CallOption) (*CreateTaskResponse, error) + // Start a process. + Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) + // Delete a task and on disk state. + Delete(ctx context.Context, in *DeleteTaskRequest, opts ...grpc.CallOption) (*DeleteResponse, error) + DeleteProcess(ctx context.Context, in *DeleteProcessRequest, opts ...grpc.CallOption) (*DeleteResponse, error) + Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) + List(ctx context.Context, in *ListTasksRequest, opts ...grpc.CallOption) (*ListTasksResponse, error) + // Kill a task or process. + Kill(ctx context.Context, in *KillRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + Exec(ctx context.Context, in *ExecProcessRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + ResizePty(ctx context.Context, in *ResizePtyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + CloseIO(ctx context.Context, in *CloseIORequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + Pause(ctx context.Context, in *PauseTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + Resume(ctx context.Context, in *ResumeTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + ListPids(ctx context.Context, in *ListPidsRequest, opts ...grpc.CallOption) (*ListPidsResponse, error) + Checkpoint(ctx context.Context, in *CheckpointTaskRequest, opts ...grpc.CallOption) (*CheckpointTaskResponse, error) + Update(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + Metrics(ctx context.Context, in *MetricsRequest, opts ...grpc.CallOption) (*MetricsResponse, error) + Wait(ctx context.Context, in *WaitRequest, opts ...grpc.CallOption) (*WaitResponse, error) +} + +type tasksClient struct { + cc grpc.ClientConnInterface +} + +func NewTasksClient(cc grpc.ClientConnInterface) TasksClient { + return &tasksClient{cc} +} + +func (c *tasksClient) Create(ctx context.Context, in *CreateTaskRequest, opts ...grpc.CallOption) (*CreateTaskResponse, error) { + out := new(CreateTaskResponse) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Create", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) { + out := new(StartResponse) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Start", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Delete(ctx context.Context, in *DeleteTaskRequest, opts ...grpc.CallOption) (*DeleteResponse, error) { + out := new(DeleteResponse) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) DeleteProcess(ctx context.Context, in *DeleteProcessRequest, opts ...grpc.CallOption) (*DeleteResponse, error) { + out := new(DeleteResponse) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/DeleteProcess", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { + out := new(GetResponse) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) List(ctx context.Context, in *ListTasksRequest, opts ...grpc.CallOption) (*ListTasksResponse, error) { + out := new(ListTasksResponse) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/List", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Kill(ctx context.Context, in *KillRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Kill", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Exec(ctx context.Context, in *ExecProcessRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Exec", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) ResizePty(ctx context.Context, in *ResizePtyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/ResizePty", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) CloseIO(ctx context.Context, in *CloseIORequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/CloseIO", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Pause(ctx context.Context, in *PauseTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Pause", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Resume(ctx context.Context, in *ResumeTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Resume", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) ListPids(ctx context.Context, in *ListPidsRequest, opts ...grpc.CallOption) (*ListPidsResponse, error) { + out := new(ListPidsResponse) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/ListPids", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Checkpoint(ctx context.Context, in *CheckpointTaskRequest, opts ...grpc.CallOption) (*CheckpointTaskResponse, error) { + out := new(CheckpointTaskResponse) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Checkpoint", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Update(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Update", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Metrics(ctx context.Context, in *MetricsRequest, opts ...grpc.CallOption) (*MetricsResponse, error) { + out := new(MetricsResponse) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Metrics", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *tasksClient) Wait(ctx context.Context, in *WaitRequest, opts ...grpc.CallOption) (*WaitResponse, error) { + out := new(WaitResponse) + err := c.cc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Wait", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// TasksServer is the server API for Tasks service. +// All implementations must embed UnimplementedTasksServer +// for forward compatibility +type TasksServer interface { + // Create a task. + Create(context.Context, *CreateTaskRequest) (*CreateTaskResponse, error) + // Start a process. + Start(context.Context, *StartRequest) (*StartResponse, error) + // Delete a task and on disk state. + Delete(context.Context, *DeleteTaskRequest) (*DeleteResponse, error) + DeleteProcess(context.Context, *DeleteProcessRequest) (*DeleteResponse, error) + Get(context.Context, *GetRequest) (*GetResponse, error) + List(context.Context, *ListTasksRequest) (*ListTasksResponse, error) + // Kill a task or process. + Kill(context.Context, *KillRequest) (*emptypb.Empty, error) + Exec(context.Context, *ExecProcessRequest) (*emptypb.Empty, error) + ResizePty(context.Context, *ResizePtyRequest) (*emptypb.Empty, error) + CloseIO(context.Context, *CloseIORequest) (*emptypb.Empty, error) + Pause(context.Context, *PauseTaskRequest) (*emptypb.Empty, error) + Resume(context.Context, *ResumeTaskRequest) (*emptypb.Empty, error) + ListPids(context.Context, *ListPidsRequest) (*ListPidsResponse, error) + Checkpoint(context.Context, *CheckpointTaskRequest) (*CheckpointTaskResponse, error) + Update(context.Context, *UpdateTaskRequest) (*emptypb.Empty, error) + Metrics(context.Context, *MetricsRequest) (*MetricsResponse, error) + Wait(context.Context, *WaitRequest) (*WaitResponse, error) + mustEmbedUnimplementedTasksServer() +} + +// UnimplementedTasksServer must be embedded to have forward compatible implementations. +type UnimplementedTasksServer struct { +} + +func (UnimplementedTasksServer) Create(context.Context, *CreateTaskRequest) (*CreateTaskResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Create not implemented") +} +func (UnimplementedTasksServer) Start(context.Context, *StartRequest) (*StartResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Start not implemented") +} +func (UnimplementedTasksServer) Delete(context.Context, *DeleteTaskRequest) (*DeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (UnimplementedTasksServer) DeleteProcess(context.Context, *DeleteProcessRequest) (*DeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteProcess not implemented") +} +func (UnimplementedTasksServer) Get(context.Context, *GetRequest) (*GetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (UnimplementedTasksServer) List(context.Context, *ListTasksRequest) (*ListTasksResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method List not implemented") +} +func (UnimplementedTasksServer) Kill(context.Context, *KillRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Kill not implemented") +} +func (UnimplementedTasksServer) Exec(context.Context, *ExecProcessRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Exec not implemented") +} +func (UnimplementedTasksServer) ResizePty(context.Context, *ResizePtyRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method ResizePty not implemented") +} +func (UnimplementedTasksServer) CloseIO(context.Context, *CloseIORequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method CloseIO not implemented") +} +func (UnimplementedTasksServer) Pause(context.Context, *PauseTaskRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Pause not implemented") +} +func (UnimplementedTasksServer) Resume(context.Context, *ResumeTaskRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Resume not implemented") +} +func (UnimplementedTasksServer) ListPids(context.Context, *ListPidsRequest) (*ListPidsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListPids not implemented") +} +func (UnimplementedTasksServer) Checkpoint(context.Context, *CheckpointTaskRequest) (*CheckpointTaskResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Checkpoint not implemented") +} +func (UnimplementedTasksServer) Update(context.Context, *UpdateTaskRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") +} +func (UnimplementedTasksServer) Metrics(context.Context, *MetricsRequest) (*MetricsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Metrics not implemented") +} +func (UnimplementedTasksServer) Wait(context.Context, *WaitRequest) (*WaitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Wait not implemented") +} +func (UnimplementedTasksServer) mustEmbedUnimplementedTasksServer() {} + +// UnsafeTasksServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to TasksServer will +// result in compilation errors. +type UnsafeTasksServer interface { + mustEmbedUnimplementedTasksServer() +} + +func RegisterTasksServer(s grpc.ServiceRegistrar, srv TasksServer) { + s.RegisterService(&Tasks_ServiceDesc, srv) +} + +func _Tasks_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Create(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Create", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Create(ctx, req.(*CreateTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StartRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Start(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Start", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Start(ctx, req.(*StartRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Delete(ctx, req.(*DeleteTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_DeleteProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteProcessRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).DeleteProcess(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/DeleteProcess", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).DeleteProcess(ctx, req.(*DeleteProcessRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Get(ctx, req.(*GetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListTasksRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/List", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).List(ctx, req.(*ListTasksRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Kill_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(KillRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Kill(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Kill", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Kill(ctx, req.(*KillRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Exec_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExecProcessRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Exec(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Exec", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Exec(ctx, req.(*ExecProcessRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_ResizePty_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ResizePtyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).ResizePty(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/ResizePty", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).ResizePty(ctx, req.(*ResizePtyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_CloseIO_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CloseIORequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).CloseIO(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/CloseIO", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).CloseIO(ctx, req.(*CloseIORequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Pause_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PauseTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Pause(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Pause", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Pause(ctx, req.(*PauseTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Resume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ResumeTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Resume(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Resume", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Resume(ctx, req.(*ResumeTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_ListPids_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListPidsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).ListPids(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/ListPids", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).ListPids(ctx, req.(*ListPidsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Checkpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CheckpointTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Checkpoint(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Checkpoint", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Checkpoint(ctx, req.(*CheckpointTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Update(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Update", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Update(ctx, req.(*UpdateTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Metrics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MetricsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Metrics(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Metrics", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Metrics(ctx, req.(*MetricsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Tasks_Wait_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WaitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TasksServer).Wait(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.tasks.v1.Tasks/Wait", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TasksServer).Wait(ctx, req.(*WaitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Tasks_ServiceDesc is the grpc.ServiceDesc for Tasks service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Tasks_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.tasks.v1.Tasks", + HandlerType: (*TasksServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Create", + Handler: _Tasks_Create_Handler, + }, + { + MethodName: "Start", + Handler: _Tasks_Start_Handler, + }, + { + MethodName: "Delete", + Handler: _Tasks_Delete_Handler, + }, + { + MethodName: "DeleteProcess", + Handler: _Tasks_DeleteProcess_Handler, + }, + { + MethodName: "Get", + Handler: _Tasks_Get_Handler, + }, + { + MethodName: "List", + Handler: _Tasks_List_Handler, + }, + { + MethodName: "Kill", + Handler: _Tasks_Kill_Handler, + }, + { + MethodName: "Exec", + Handler: _Tasks_Exec_Handler, + }, + { + MethodName: "ResizePty", + Handler: _Tasks_ResizePty_Handler, + }, + { + MethodName: "CloseIO", + Handler: _Tasks_CloseIO_Handler, + }, + { + MethodName: "Pause", + Handler: _Tasks_Pause_Handler, + }, + { + MethodName: "Resume", + Handler: _Tasks_Resume_Handler, + }, + { + MethodName: "ListPids", + Handler: _Tasks_ListPids_Handler, + }, + { + MethodName: "Checkpoint", + Handler: _Tasks_Checkpoint_Handler, + }, + { + MethodName: "Update", + Handler: _Tasks_Update_Handler, + }, + { + MethodName: "Metrics", + Handler: _Tasks_Metrics_Handler, + }, + { + MethodName: "Wait", + Handler: _Tasks_Wait_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/containerd/containerd/api/services/tasks/v1/tasks.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/services/transfer/v1/doc.go b/vendor/github.com/containerd/containerd/api/services/transfer/v1/doc.go new file mode 100644 index 0000000000000..0882a6e922b91 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/transfer/v1/doc.go @@ -0,0 +1,17 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transfer diff --git a/vendor/github.com/containerd/containerd/api/services/transfer/v1/transfer.pb.go b/vendor/github.com/containerd/containerd/api/services/transfer/v1/transfer.pb.go new file mode 100644 index 0000000000000..b6e959babd562 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/transfer/v1/transfer.pb.go @@ -0,0 +1,274 @@ +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/transfer/v1/transfer.proto + +package transfer + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type TransferRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Source *anypb.Any `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"` + Destination *anypb.Any `protobuf:"bytes,2,opt,name=destination,proto3" json:"destination,omitempty"` + Options *TransferOptions `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"` +} + +func (x *TransferRequest) Reset() { + *x = TransferRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransferRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransferRequest) ProtoMessage() {} + +func (x *TransferRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransferRequest.ProtoReflect.Descriptor instead. +func (*TransferRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDescGZIP(), []int{0} +} + +func (x *TransferRequest) GetSource() *anypb.Any { + if x != nil { + return x.Source + } + return nil +} + +func (x *TransferRequest) GetDestination() *anypb.Any { + if x != nil { + return x.Destination + } + return nil +} + +func (x *TransferRequest) GetOptions() *TransferOptions { + if x != nil { + return x.Options + } + return nil +} + +type TransferOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ProgressStream string `protobuf:"bytes,1,opt,name=progress_stream,json=progressStream,proto3" json:"progress_stream,omitempty"` // Progress min interval +} + +func (x *TransferOptions) Reset() { + *x = TransferOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TransferOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TransferOptions) ProtoMessage() {} + +func (x *TransferOptions) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TransferOptions.ProtoReflect.Descriptor instead. +func (*TransferOptions) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDescGZIP(), []int{1} +} + +func (x *TransferOptions) GetProgressStream() string { + if x != nil { + return x.ProgressStream + } + return "" +} + +var File_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDesc = []byte{ + 0x0a, 0x48, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x66, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1f, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, + 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0xc3, 0x01, 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x06, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x36, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, + 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2e, 0x76, 0x31, + 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x3a, 0x0a, 0x0f, 0x54, 0x72, 0x61, + 0x6e, 0x73, 0x66, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, + 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x32, 0x60, 0x0a, 0x08, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, + 0x72, 0x12, 0x54, 0x0a, 0x08, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x12, 0x30, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, + 0x54, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x44, 0x5a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, + 0x72, 0x2f, 0x76, 0x31, 0x3b, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDescData = file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDesc +) + +func file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_goTypes = []interface{}{ + (*TransferRequest)(nil), // 0: containerd.services.transfer.v1.TransferRequest + (*TransferOptions)(nil), // 1: containerd.services.transfer.v1.TransferOptions + (*anypb.Any)(nil), // 2: google.protobuf.Any + (*emptypb.Empty)(nil), // 3: google.protobuf.Empty +} +var file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_depIdxs = []int32{ + 2, // 0: containerd.services.transfer.v1.TransferRequest.source:type_name -> google.protobuf.Any + 2, // 1: containerd.services.transfer.v1.TransferRequest.destination:type_name -> google.protobuf.Any + 1, // 2: containerd.services.transfer.v1.TransferRequest.options:type_name -> containerd.services.transfer.v1.TransferOptions + 0, // 3: containerd.services.transfer.v1.Transfer.Transfer:input_type -> containerd.services.transfer.v1.TransferRequest + 3, // 4: containerd.services.transfer.v1.Transfer.Transfer:output_type -> google.protobuf.Empty + 4, // [4:5] is the sub-list for method output_type + 3, // [3:4] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_init() } +func file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_init() { + if File_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransferRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TransferOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto = out.File + file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_transfer_v1_transfer_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/services/transfer/v1/transfer.proto b/vendor/github.com/containerd/containerd/api/services/transfer/v1/transfer.proto new file mode 100644 index 0000000000000..a8f25ee593ef2 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/transfer/v1/transfer.proto @@ -0,0 +1,39 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +syntax = "proto3"; + +package containerd.services.transfer.v1; + +import "google/protobuf/any.proto"; +import "google/protobuf/empty.proto"; + +option go_package = "github.com/containerd/containerd/api/services/transfer/v1;transfer"; + +service Transfer { + rpc Transfer(TransferRequest) returns (google.protobuf.Empty); +} + +message TransferRequest { + google.protobuf.Any source = 1; + google.protobuf.Any destination = 2; + TransferOptions options = 3; +} + +message TransferOptions { + string progress_stream = 1; + // Progress min interval +} diff --git a/vendor/github.com/containerd/containerd/api/services/transfer/v1/transfer_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/transfer/v1/transfer_grpc.pb.go new file mode 100644 index 0000000000000..cf108744bd62d --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/transfer/v1/transfer_grpc.pb.go @@ -0,0 +1,106 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/transfer/v1/transfer.proto + +package transfer + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// TransferClient is the client API for Transfer service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type TransferClient interface { + Transfer(ctx context.Context, in *TransferRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) +} + +type transferClient struct { + cc grpc.ClientConnInterface +} + +func NewTransferClient(cc grpc.ClientConnInterface) TransferClient { + return &transferClient{cc} +} + +func (c *transferClient) Transfer(ctx context.Context, in *TransferRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/containerd.services.transfer.v1.Transfer/Transfer", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// TransferServer is the server API for Transfer service. +// All implementations must embed UnimplementedTransferServer +// for forward compatibility +type TransferServer interface { + Transfer(context.Context, *TransferRequest) (*emptypb.Empty, error) + mustEmbedUnimplementedTransferServer() +} + +// UnimplementedTransferServer must be embedded to have forward compatible implementations. +type UnimplementedTransferServer struct { +} + +func (UnimplementedTransferServer) Transfer(context.Context, *TransferRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Transfer not implemented") +} +func (UnimplementedTransferServer) mustEmbedUnimplementedTransferServer() {} + +// UnsafeTransferServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to TransferServer will +// result in compilation errors. +type UnsafeTransferServer interface { + mustEmbedUnimplementedTransferServer() +} + +func RegisterTransferServer(s grpc.ServiceRegistrar, srv TransferServer) { + s.RegisterService(&Transfer_ServiceDesc, srv) +} + +func _Transfer_Transfer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(TransferRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TransferServer).Transfer(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.transfer.v1.Transfer/Transfer", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TransferServer).Transfer(ctx, req.(*TransferRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Transfer_ServiceDesc is the grpc.ServiceDesc for Transfer service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Transfer_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.transfer.v1.Transfer", + HandlerType: (*TransferServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Transfer", + Handler: _Transfer_Transfer_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/containerd/containerd/api/services/transfer/v1/transfer.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.pb.go b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.pb.go index b1f275bf0de83..221b183f7cef2 100644 --- a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.pb.go @@ -1,760 +1,292 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto package events import ( - context "context" - fmt "fmt" - github_com_containerd_ttrpc "github.com/containerd/ttrpc" - github_com_containerd_typeurl "github.com/containerd/typeurl" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types "github.com/gogo/protobuf/types" - io "io" - math "math" - math_bits "math/bits" + _ "github.com/containerd/containerd/protobuf/plugin" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + emptypb "google.golang.org/protobuf/types/known/emptypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - strings "strings" - time "time" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type ForwardRequest struct { - Envelope *Envelope `protobuf:"bytes,1,opt,name=envelope,proto3" json:"envelope,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *ForwardRequest) Reset() { *m = ForwardRequest{} } -func (*ForwardRequest) ProtoMessage() {} -func (*ForwardRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_19f98672016720b5, []int{0} -} -func (m *ForwardRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ForwardRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ForwardRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ForwardRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ForwardRequest.Merge(m, src) + Envelope *Envelope `protobuf:"bytes,1,opt,name=envelope,proto3" json:"envelope,omitempty"` } -func (m *ForwardRequest) XXX_Size() int { - return m.Size() -} -func (m *ForwardRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ForwardRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ForwardRequest proto.InternalMessageInfo -type Envelope struct { - Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"timestamp"` - Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` - Topic string `protobuf:"bytes,3,opt,name=topic,proto3" json:"topic,omitempty"` - Event *types.Any `protobuf:"bytes,4,opt,name=event,proto3" json:"event,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Envelope) Reset() { *m = Envelope{} } -func (*Envelope) ProtoMessage() {} -func (*Envelope) Descriptor() ([]byte, []int) { - return fileDescriptor_19f98672016720b5, []int{1} -} -func (m *Envelope) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Envelope) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Envelope.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *ForwardRequest) Reset() { + *x = ForwardRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *Envelope) XXX_Merge(src proto.Message) { - xxx_messageInfo_Envelope.Merge(m, src) -} -func (m *Envelope) XXX_Size() int { - return m.Size() -} -func (m *Envelope) XXX_DiscardUnknown() { - xxx_messageInfo_Envelope.DiscardUnknown(m) -} - -var xxx_messageInfo_Envelope proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ForwardRequest)(nil), "containerd.services.events.ttrpc.v1.ForwardRequest") - proto.RegisterType((*Envelope)(nil), "containerd.services.events.ttrpc.v1.Envelope") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto", fileDescriptor_19f98672016720b5) -} -var fileDescriptor_19f98672016720b5 = []byte{ - // 396 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x52, 0xc1, 0x8e, 0xd3, 0x30, - 0x10, 0x8d, 0x61, 0x77, 0x69, 0x8d, 0xc4, 0xc1, 0xaa, 0x50, 0x08, 0x28, 0x59, 0x2d, 0x97, 0x15, - 0x12, 0xb6, 0x76, 0xf7, 0x06, 0x17, 0xa8, 0x28, 0x12, 0x1c, 0x23, 0x84, 0x2a, 0x90, 0x10, 0x6e, - 0x3a, 0x4d, 0x2d, 0x25, 0xb6, 0x49, 0x9c, 0xa0, 0xde, 0xfa, 0x09, 0x7c, 0x0c, 0x17, 0xfe, 0xa0, - 0x47, 0x8e, 0x9c, 0x80, 0xe6, 0x4b, 0x50, 0x9d, 0xa4, 0x81, 0xf6, 0x40, 0xa5, 0xbd, 0xbd, 0xcc, - 0x7b, 0x6f, 0xde, 0xcc, 0xc4, 0xf8, 0x75, 0x2c, 0xcc, 0xbc, 0x98, 0xd0, 0x48, 0xa5, 0x2c, 0x52, - 0xd2, 0x70, 0x21, 0x21, 0x9b, 0xfe, 0x0d, 0xb9, 0x16, 0x2c, 0x87, 0xac, 0x14, 0x11, 0xe4, 0xcc, - 0x98, 0x4c, 0x47, 0x0c, 0x4a, 0x90, 0x26, 0x67, 0xe5, 0x45, 0x83, 0xa8, 0xce, 0x94, 0x51, 0xe4, - 0x61, 0xe7, 0xa2, 0xad, 0x83, 0x36, 0x0a, 0x6b, 0xa4, 0xe5, 0x85, 0xf7, 0xec, 0xbf, 0x81, 0xb6, - 0xd9, 0xa4, 0x98, 0x31, 0x9d, 0x14, 0xb1, 0x90, 0x6c, 0x26, 0x20, 0x99, 0x6a, 0x6e, 0xe6, 0x75, - 0x8c, 0x37, 0x88, 0x55, 0xac, 0x2c, 0x64, 0x1b, 0xd4, 0x54, 0xef, 0xc5, 0x4a, 0xc5, 0x09, 0x74, - 0x6e, 0x2e, 0x17, 0x0d, 0x75, 0x7f, 0x97, 0x82, 0x54, 0x9b, 0x96, 0x0c, 0x76, 0x49, 0x23, 0x52, - 0xc8, 0x0d, 0x4f, 0x75, 0x2d, 0x38, 0x7b, 0x8f, 0xef, 0xbc, 0x54, 0xd9, 0x67, 0x9e, 0x4d, 0x43, - 0xf8, 0x54, 0x40, 0x6e, 0xc8, 0x2b, 0xdc, 0x03, 0x59, 0x42, 0xa2, 0x34, 0xb8, 0xe8, 0x14, 0x9d, - 0xdf, 0xbe, 0x7c, 0x4c, 0x0f, 0x58, 0x9d, 0x8e, 0x1a, 0x53, 0xb8, 0xb5, 0x9f, 0x7d, 0x45, 0xb8, - 0xd7, 0x96, 0xc9, 0x10, 0xf7, 0xb7, 0xe1, 0x4d, 0x63, 0x8f, 0xd6, 0xe3, 0xd1, 0x76, 0x3c, 0xfa, - 0xa6, 0x55, 0x0c, 0x7b, 0xab, 0x9f, 0x81, 0xf3, 0xe5, 0x57, 0x80, 0xc2, 0xce, 0x46, 0x1e, 0xe0, - 0xbe, 0xe4, 0x29, 0xe4, 0x9a, 0x47, 0xe0, 0xde, 0x38, 0x45, 0xe7, 0xfd, 0xb0, 0x2b, 0x90, 0x01, - 0x3e, 0x36, 0x4a, 0x8b, 0xc8, 0xbd, 0x69, 0x99, 0xfa, 0x83, 0x3c, 0xc2, 0xc7, 0x76, 0x54, 0xf7, - 0xc8, 0x66, 0x0e, 0xf6, 0x32, 0x9f, 0xcb, 0x45, 0x58, 0x4b, 0x9e, 0x1c, 0x2d, 0xbf, 0x05, 0xe8, - 0xf2, 0x23, 0x3e, 0x19, 0xd9, 0xe5, 0xc8, 0x5b, 0x7c, 0xab, 0xb9, 0x0e, 0xb9, 0x3a, 0xe8, 0x08, - 0xff, 0xde, 0xd2, 0xbb, 0xbb, 0x17, 0x36, 0xda, 0xfc, 0x9c, 0xe1, 0x87, 0xd5, 0xda, 0x77, 0x7e, - 0xac, 0x7d, 0x67, 0x59, 0xf9, 0x68, 0x55, 0xf9, 0xe8, 0x7b, 0xe5, 0xa3, 0xdf, 0x95, 0x8f, 0xde, - 0xbd, 0xb8, 0xd6, 0x8b, 0x7d, 0x5a, 0xa3, 0xb1, 0x33, 0x46, 0x93, 0x13, 0x9b, 0x79, 0xf5, 0x27, - 0x00, 0x00, 0xff, 0xff, 0xd4, 0x90, 0xbd, 0x09, 0x04, 0x03, 0x00, 0x00, +func (x *ForwardRequest) String() string { + return protoimpl.X.MessageStringOf(x) } -// Field returns the value for the given fieldpath as a string, if defined. -// If the value is not defined, the second value will be false. -func (m *Envelope) Field(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - // unhandled: timestamp - case "namespace": - return string(m.Namespace), len(m.Namespace) > 0 - case "topic": - return string(m.Topic), len(m.Topic) > 0 - case "event": - decoded, err := github_com_containerd_typeurl.UnmarshalAny(m.Event) - if err != nil { - return "", false - } +func (*ForwardRequest) ProtoMessage() {} - adaptor, ok := decoded.(interface{ Field([]string) (string, bool) }) - if !ok { - return "", false +func (x *ForwardRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return adaptor.Field(fieldpath[1:]) + return ms } - return "", false -} -func (m *ForwardRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *ForwardRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use ForwardRequest.ProtoReflect.Descriptor instead. +func (*ForwardRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDescGZIP(), []int{0} } -func (m *ForwardRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) +func (x *ForwardRequest) GetEnvelope() *Envelope { + if x != nil { + return x.Envelope } - if m.Envelope != nil { - { - size, err := m.Envelope.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintEvents(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return nil } -func (m *Envelope) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} +type Envelope struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Envelope) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Topic string `protobuf:"bytes,3,opt,name=topic,proto3" json:"topic,omitempty"` + Event *anypb.Any `protobuf:"bytes,4,opt,name=event,proto3" json:"event,omitempty"` } -func (m *Envelope) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Event != nil { - { - size, err := m.Event.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintEvents(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 +func (x *Envelope) Reset() { + *x = Envelope{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - if len(m.Topic) > 0 { - i -= len(m.Topic) - copy(dAtA[i:], m.Topic) - i = encodeVarintEvents(dAtA, i, uint64(len(m.Topic))) - i-- - dAtA[i] = 0x1a - } - if len(m.Namespace) > 0 { - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintEvents(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x12 - } - n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err3 != nil { - return 0, err3 - } - i -= n3 - i = encodeVarintEvents(dAtA, i, uint64(n3)) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil } -func encodeVarintEvents(dAtA []byte, offset int, v uint64) int { - offset -= sovEvents(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ForwardRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Envelope != nil { - l = m.Envelope.Size() - n += 1 + l + sovEvents(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n +func (x *Envelope) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Envelope) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovEvents(uint64(l)) - l = len(m.Namespace) - if l > 0 { - n += 1 + l + sovEvents(uint64(l)) - } - l = len(m.Topic) - if l > 0 { - n += 1 + l + sovEvents(uint64(l)) - } - if m.Event != nil { - l = m.Event.Size() - n += 1 + l + sovEvents(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} +func (*Envelope) ProtoMessage() {} -func sovEvents(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozEvents(x uint64) (n int) { - return sovEvents(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ForwardRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ForwardRequest{`, - `Envelope:` + strings.Replace(this.Envelope.String(), "Envelope", "Envelope", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *Envelope) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Envelope{`, - `Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Topic:` + fmt.Sprintf("%v", this.Topic) + `,`, - `Event:` + strings.Replace(fmt.Sprintf("%v", this.Event), "Any", "types.Any", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringEvents(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" +func (x *Envelope) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return mi.MessageOf(x) } -type EventsService interface { - Forward(ctx context.Context, req *ForwardRequest) (*types.Empty, error) -} - -func RegisterEventsService(srv *github_com_containerd_ttrpc.Server, svc EventsService) { - srv.Register("containerd.services.events.ttrpc.v1.Events", map[string]github_com_containerd_ttrpc.Method{ - "Forward": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req ForwardRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Forward(ctx, &req) - }, - }) +// Deprecated: Use Envelope.ProtoReflect.Descriptor instead. +func (*Envelope) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDescGZIP(), []int{1} } -type eventsClient struct { - client *github_com_containerd_ttrpc.Client +func (x *Envelope) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp + } + return nil } -func NewEventsClient(client *github_com_containerd_ttrpc.Client) EventsService { - return &eventsClient{ - client: client, +func (x *Envelope) GetNamespace() string { + if x != nil { + return x.Namespace } + return "" } -func (c *eventsClient) Forward(ctx context.Context, req *ForwardRequest) (*types.Empty, error) { - var resp types.Empty - if err := c.client.Call(ctx, "containerd.services.events.ttrpc.v1.Events", "Forward", req, &resp); err != nil { - return nil, err +func (x *Envelope) GetTopic() string { + if x != nil { + return x.Topic } - return &resp, nil + return "" } -func (m *ForwardRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ForwardRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ForwardRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Envelope", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvents - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvents - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Envelope == nil { - m.Envelope = &Envelope{} - } - if err := m.Envelope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipEvents(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEvents - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *Envelope) GetEvent() *anypb.Any { + if x != nil { + return x.Event } return nil } -func (m *Envelope) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Envelope: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Envelope: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvents - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvents - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEvents - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEvents - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthEvents - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthEvents - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Topic = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEvents - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEvents - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthEvents - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Event == nil { - m.Event = &types.Any{} - } - if err := m.Event.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipEvents(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEvents - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil +var File_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDesc = []byte{ + 0x0a, 0x4a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x74, 0x74, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x76, 0x31, 0x2f, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x23, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x74, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x76, + 0x31, 0x1a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x5b, 0x0a, 0x0e, + 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, + 0x0a, 0x08, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2d, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x74, 0x74, + 0x72, 0x70, 0x63, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x52, + 0x08, 0x65, 0x6e, 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x22, 0xaa, 0x01, 0x0a, 0x08, 0x45, 0x6e, + 0x76, 0x65, 0x6c, 0x6f, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, + 0x6f, 0x70, 0x69, 0x63, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x3a, 0x04, 0x80, 0xb9, 0x1f, 0x01, 0x32, 0x60, 0x0a, 0x06, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x12, 0x56, 0x0a, 0x07, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x12, 0x33, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x73, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x74, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x76, + 0x31, 0x2e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x46, 0x5a, 0x44, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x74, 0x74, 0x72, 0x70, 0x63, 0x2f, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2f, 0x76, 0x31, 0x3b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func skipEvents(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEvents - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + +var ( + file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDescData = file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDesc +) + +func file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_goTypes = []interface{}{ + (*ForwardRequest)(nil), // 0: containerd.services.events.ttrpc.v1.ForwardRequest + (*Envelope)(nil), // 1: containerd.services.events.ttrpc.v1.Envelope + (*timestamppb.Timestamp)(nil), // 2: google.protobuf.Timestamp + (*anypb.Any)(nil), // 3: google.protobuf.Any + (*emptypb.Empty)(nil), // 4: google.protobuf.Empty +} +var file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_depIdxs = []int32{ + 1, // 0: containerd.services.events.ttrpc.v1.ForwardRequest.envelope:type_name -> containerd.services.events.ttrpc.v1.Envelope + 2, // 1: containerd.services.events.ttrpc.v1.Envelope.timestamp:type_name -> google.protobuf.Timestamp + 3, // 2: containerd.services.events.ttrpc.v1.Envelope.event:type_name -> google.protobuf.Any + 0, // 3: containerd.services.events.ttrpc.v1.Events.Forward:input_type -> containerd.services.events.ttrpc.v1.ForwardRequest + 4, // 4: containerd.services.events.ttrpc.v1.Events.Forward:output_type -> google.protobuf.Empty + 4, // [4:5] is the sub-list for method output_type + 3, // [3:4] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_init() } +func file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_init() { + if File_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ForwardRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEvents - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } + file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Envelope); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEvents - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthEvents - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupEvents - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthEvents - } - if depth == 0 { - return iNdEx, nil } } - return 0, io.ErrUnexpectedEOF + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto = out.File + file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_ttrpc_events_v1_events_proto_depIdxs = nil } - -var ( - ErrInvalidLengthEvents = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowEvents = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupEvents = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto index ade1c7abefd53..e0c2f232d3648 100644 --- a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto +++ b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto @@ -18,8 +18,7 @@ syntax = "proto3"; package containerd.services.events.ttrpc.v1; -import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; -import weak "gogoproto/gogo.proto"; +import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto"; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/timestamp.proto"; @@ -41,7 +40,7 @@ message ForwardRequest { message Envelope { option (containerd.plugin.fieldpath) = true; - google.protobuf.Timestamp timestamp = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp timestamp = 1; string namespace = 2; string topic = 3; google.protobuf.Any event = 4; diff --git a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events_fieldpath.pb.go b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events_fieldpath.pb.go new file mode 100644 index 0000000000000..ad48127be919c --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events_fieldpath.pb.go @@ -0,0 +1,55 @@ +// Code generated by protoc-gen-go-fieldpath. DO NOT EDIT. +// source: github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto +package events + +import ( + v2 "github.com/containerd/typeurl/v2" +) + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *ForwardRequest) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "envelope": + // NOTE(stevvooe): This is probably not correct in many cases. + // We assume that the target message also implements the Field + // method, which isn't likely true in a lot of cases. + // + // If you have a broken build and have found this comment, + // you may be closer to a solution. + if m.Envelope == nil { + return "", false + } + return m.Envelope.Field(fieldpath[1:]) + } + return "", false +} + +// Field returns the value for the given fieldpath as a string, if defined. +// If the value is not defined, the second value will be false. +func (m *Envelope) Field(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + // unhandled: timestamp + case "namespace": + return string(m.Namespace), len(m.Namespace) > 0 + case "topic": + return string(m.Topic), len(m.Topic) > 0 + case "event": + decoded, err := v2.UnmarshalAny(m.Event) + if err != nil { + return "", false + } + adaptor, ok := decoded.(interface{ Field([]string) (string, bool) }) + if !ok { + return "", false + } + return adaptor.Field(fieldpath[1:]) + } + return "", false +} diff --git a/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events_ttrpc.pb.go b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events_ttrpc.pb.go new file mode 100644 index 0000000000000..8828c6cbcc215 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/ttrpc/events/v1/events_ttrpc.pb.go @@ -0,0 +1,45 @@ +// Code generated by protoc-gen-go-ttrpc. DO NOT EDIT. +// source: github.com/containerd/containerd/api/services/ttrpc/events/v1/events.proto +package events + +import ( + context "context" + ttrpc "github.com/containerd/ttrpc" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +type EventsService interface { + Forward(context.Context, *ForwardRequest) (*emptypb.Empty, error) +} + +func RegisterEventsService(srv *ttrpc.Server, svc EventsService) { + srv.RegisterService("containerd.services.events.ttrpc.v1.Events", &ttrpc.ServiceDesc{ + Methods: map[string]ttrpc.Method{ + "Forward": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { + var req ForwardRequest + if err := unmarshal(&req); err != nil { + return nil, err + } + return svc.Forward(ctx, &req) + }, + }, + }) +} + +type eventsClient struct { + client *ttrpc.Client +} + +func NewEventsClient(client *ttrpc.Client) EventsService { + return &eventsClient{ + client: client, + } +} + +func (c *eventsClient) Forward(ctx context.Context, req *ForwardRequest) (*emptypb.Empty, error) { + var resp emptypb.Empty + if err := c.client.Call(ctx, "containerd.services.events.ttrpc.v1.Events", "Forward", req, &resp); err != nil { + return nil, err + } + return &resp, nil +} diff --git a/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go b/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go index b742c6ae62f57..c087d3e26be18 100644 --- a/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go @@ -1,476 +1,187 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/services/version/v1/version.proto package version import ( - context "context" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - types "github.com/gogo/protobuf/types" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type VersionResponse struct { - Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` - Revision string `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VersionResponse) Reset() { *m = VersionResponse{} } -func (*VersionResponse) ProtoMessage() {} -func (*VersionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_128109001e578ffe, []int{0} -} -func (m *VersionResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VersionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_VersionResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *VersionResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_VersionResponse.Merge(m, src) -} -func (m *VersionResponse) XXX_Size() int { - return m.Size() -} -func (m *VersionResponse) XXX_DiscardUnknown() { - xxx_messageInfo_VersionResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_VersionResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*VersionResponse)(nil), "containerd.services.version.v1.VersionResponse") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/version/v1/version.proto", fileDescriptor_128109001e578ffe) -} - -var fileDescriptor_128109001e578ffe = []byte{ - // 243 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4b, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, - 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb, - 0x97, 0xa5, 0x16, 0x15, 0x67, 0xe6, 0xe7, 0xe9, 0x97, 0x19, 0xc2, 0x98, 0x7a, 0x05, 0x45, 0xf9, - 0x25, 0xf9, 0x42, 0x72, 0x08, 0x1d, 0x7a, 0x30, 0xd5, 0x7a, 0x30, 0x25, 0x65, 0x86, 0x52, 0xd2, - 0xe9, 0xf9, 0xf9, 0xe9, 0x39, 0xa9, 0xfa, 0x60, 0xd5, 0x49, 0xa5, 0x69, 0xfa, 0xa9, 0xb9, 0x05, - 0x25, 0x95, 0x10, 0xcd, 0x52, 0x22, 0xe9, 0xf9, 0xe9, 0xf9, 0x60, 0xa6, 0x3e, 0x88, 0x05, 0x11, - 0x55, 0x72, 0xe7, 0xe2, 0x0f, 0x83, 0x18, 0x10, 0x94, 0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a, - 0x24, 0xc1, 0xc5, 0x0e, 0x35, 0x53, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0xc6, 0x15, 0x92, - 0xe2, 0xe2, 0x28, 0x4a, 0x2d, 0xcb, 0x04, 0x4b, 0x31, 0x81, 0xa5, 0xe0, 0x7c, 0xa3, 0x58, 0x2e, - 0x76, 0xa8, 0x41, 0x42, 0x41, 0x08, 0xa6, 0x98, 0x1e, 0xc4, 0x49, 0x7a, 0x30, 0x27, 0xe9, 0xb9, - 0x82, 0x9c, 0x24, 0xa5, 0xaf, 0x87, 0xdf, 0x2b, 0x7a, 0x68, 0x8e, 0x72, 0x8a, 0x3a, 0xf1, 0x50, - 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, - 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x03, 0xb9, 0x81, 0x6b, 0x0d, 0x65, 0x46, 0x30, - 0x26, 0xb1, 0x81, 0x9d, 0x67, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x95, 0x0d, 0x52, 0x23, 0xa9, - 0x01, 0x00, 0x00, -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// VersionClient is the client API for Version service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type VersionClient interface { - Version(ctx context.Context, in *types.Empty, opts ...grpc.CallOption) (*VersionResponse, error) -} - -type versionClient struct { - cc *grpc.ClientConn -} - -func NewVersionClient(cc *grpc.ClientConn) VersionClient { - return &versionClient{cc} + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + Revision string `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"` } -func (c *versionClient) Version(ctx context.Context, in *types.Empty, opts ...grpc.CallOption) (*VersionResponse, error) { - out := new(VersionResponse) - err := c.cc.Invoke(ctx, "/containerd.services.version.v1.Version/Version", in, out, opts...) - if err != nil { - return nil, err +func (x *VersionResponse) Reset() { + *x = VersionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_services_version_v1_version_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return out, nil } -// VersionServer is the server API for Version service. -type VersionServer interface { - Version(context.Context, *types.Empty) (*VersionResponse, error) +func (x *VersionResponse) String() string { + return protoimpl.X.MessageStringOf(x) } -// UnimplementedVersionServer can be embedded to have forward compatible implementations. -type UnimplementedVersionServer struct { -} - -func (*UnimplementedVersionServer) Version(ctx context.Context, req *types.Empty) (*VersionResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Version not implemented") -} - -func RegisterVersionServer(s *grpc.Server, srv VersionServer) { - s.RegisterService(&_Version_serviceDesc, srv) -} +func (*VersionResponse) ProtoMessage() {} -func _Version_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(types.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(VersionServer).Version(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.version.v1.Version/Version", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VersionServer).Version(ctx, req.(*types.Empty)) +func (x *VersionResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_services_version_v1_version_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return interceptor(ctx, in, info, handler) + return mi.MessageOf(x) } -var _Version_serviceDesc = grpc.ServiceDesc{ - ServiceName: "containerd.services.version.v1.Version", - HandlerType: (*VersionServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Version", - Handler: _Version_Version_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "github.com/containerd/containerd/api/services/version/v1/version.proto", +// Deprecated: Use VersionResponse.ProtoReflect.Descriptor instead. +func (*VersionResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDescGZIP(), []int{0} } -func (m *VersionResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *VersionResponse) GetVersion() string { + if x != nil { + return x.Version } - return dAtA[:n], nil + return "" } -func (m *VersionResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VersionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Revision) > 0 { - i -= len(m.Revision) - copy(dAtA[i:], m.Revision) - i = encodeVarintVersion(dAtA, i, uint64(len(m.Revision))) - i-- - dAtA[i] = 0x12 +func (x *VersionResponse) GetRevision() string { + if x != nil { + return x.Revision } - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintVersion(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return "" } -func encodeVarintVersion(dAtA []byte, offset int, v uint64) int { - offset -= sovVersion(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *VersionResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Version) - if l > 0 { - n += 1 + l + sovVersion(uint64(l)) - } - l = len(m.Revision) - if l > 0 { - n += 1 + l + sovVersion(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} +var File_github_com_containerd_containerd_api_services_version_v1_version_proto protoreflect.FileDescriptor -func sovVersion(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozVersion(x uint64) (n int) { - return sovVersion(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +var file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDesc = []byte{ + 0x0a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2e, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x47, 0x0a, 0x0f, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x32, 0x5d, + 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x52, 0x0a, 0x07, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x2f, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x73, 0x2e, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x42, 0x5a, + 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x3b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func (this *VersionResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VersionResponse{`, - `Version:` + fmt.Sprintf("%v", this.Version) + `,`, - `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringVersion(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *VersionResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowVersion - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VersionResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VersionResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowVersion - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthVersion - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthVersion - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowVersion - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthVersion - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthVersion - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Revision = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipVersion(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthVersion - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipVersion(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowVersion - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowVersion - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowVersion - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthVersion - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupVersion +var ( + file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDescData = file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDesc +) + +func file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_services_version_v1_version_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_github_com_containerd_containerd_api_services_version_v1_version_proto_goTypes = []interface{}{ + (*VersionResponse)(nil), // 0: containerd.services.version.v1.VersionResponse + (*emptypb.Empty)(nil), // 1: google.protobuf.Empty +} +var file_github_com_containerd_containerd_api_services_version_v1_version_proto_depIdxs = []int32{ + 1, // 0: containerd.services.version.v1.Version.Version:input_type -> google.protobuf.Empty + 0, // 1: containerd.services.version.v1.Version.Version:output_type -> containerd.services.version.v1.VersionResponse + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_services_version_v1_version_proto_init() } +func file_github_com_containerd_containerd_api_services_version_v1_version_proto_init() { + if File_github_com_containerd_containerd_api_services_version_v1_version_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_services_version_v1_version_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VersionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthVersion - } - if depth == 0 { - return iNdEx, nil } } - return 0, io.ErrUnexpectedEOF + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_github_com_containerd_containerd_api_services_version_v1_version_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_services_version_v1_version_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_services_version_v1_version_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_services_version_v1_version_proto = out.File + file_github_com_containerd_containerd_api_services_version_v1_version_proto_rawDesc = nil + file_github_com_containerd_containerd_api_services_version_v1_version_proto_goTypes = nil + file_github_com_containerd_containerd_api_services_version_v1_version_proto_depIdxs = nil } - -var ( - ErrInvalidLengthVersion = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowVersion = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupVersion = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/containerd/containerd/api/services/version/v1/version.proto b/vendor/github.com/containerd/containerd/api/services/version/v1/version.proto index 97681bb86ee79..bd948ff343b3b 100644 --- a/vendor/github.com/containerd/containerd/api/services/version/v1/version.proto +++ b/vendor/github.com/containerd/containerd/api/services/version/v1/version.proto @@ -19,7 +19,6 @@ syntax = "proto3"; package containerd.services.version.v1; import "google/protobuf/empty.proto"; -import weak "gogoproto/gogo.proto"; // TODO(stevvooe): Should version service actually be versioned? option go_package = "github.com/containerd/containerd/api/services/version/v1;version"; diff --git a/vendor/github.com/containerd/containerd/api/services/version/v1/version_grpc.pb.go b/vendor/github.com/containerd/containerd/api/services/version/v1/version_grpc.pb.go new file mode 100644 index 0000000000000..4070fd8347504 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/services/version/v1/version_grpc.pb.go @@ -0,0 +1,106 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.20.1 +// source: github.com/containerd/containerd/api/services/version/v1/version.proto + +package version + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// VersionClient is the client API for Version service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type VersionClient interface { + Version(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*VersionResponse, error) +} + +type versionClient struct { + cc grpc.ClientConnInterface +} + +func NewVersionClient(cc grpc.ClientConnInterface) VersionClient { + return &versionClient{cc} +} + +func (c *versionClient) Version(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*VersionResponse, error) { + out := new(VersionResponse) + err := c.cc.Invoke(ctx, "/containerd.services.version.v1.Version/Version", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// VersionServer is the server API for Version service. +// All implementations must embed UnimplementedVersionServer +// for forward compatibility +type VersionServer interface { + Version(context.Context, *emptypb.Empty) (*VersionResponse, error) + mustEmbedUnimplementedVersionServer() +} + +// UnimplementedVersionServer must be embedded to have forward compatible implementations. +type UnimplementedVersionServer struct { +} + +func (UnimplementedVersionServer) Version(context.Context, *emptypb.Empty) (*VersionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Version not implemented") +} +func (UnimplementedVersionServer) mustEmbedUnimplementedVersionServer() {} + +// UnsafeVersionServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to VersionServer will +// result in compilation errors. +type UnsafeVersionServer interface { + mustEmbedUnimplementedVersionServer() +} + +func RegisterVersionServer(s grpc.ServiceRegistrar, srv VersionServer) { + s.RegisterService(&Version_ServiceDesc, srv) +} + +func _Version_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VersionServer).Version(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.version.v1.Version/Version", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VersionServer).Version(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +// Version_ServiceDesc is the grpc.ServiceDesc for Version service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Version_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "containerd.services.version.v1.Version", + HandlerType: (*VersionServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Version", + Handler: _Version_Version_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/containerd/containerd/api/services/version/v1/version.proto", +} diff --git a/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go b/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go index fe71dbf433000..f3db1c52d9608 100644 --- a/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go +++ b/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go @@ -1,30 +1,39 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/types/descriptor.proto package types import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // Descriptor describes a blob in a content store. // @@ -32,567 +41,166 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // oci descriptor found in a manifest. // See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor type Descriptor struct { - MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` - Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` - Size_ int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` - Annotations map[string]string `protobuf:"bytes,5,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Descriptor) Reset() { *m = Descriptor{} } -func (*Descriptor) ProtoMessage() {} -func (*Descriptor) Descriptor() ([]byte, []int) { - return fileDescriptor_37f958df3707db9e, []int{0} + MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + Digest string `protobuf:"bytes,2,opt,name=digest,proto3" json:"digest,omitempty"` + Size int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` + Annotations map[string]string `protobuf:"bytes,5,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } -func (m *Descriptor) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Descriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Descriptor.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Descriptor) XXX_Merge(src proto.Message) { - xxx_messageInfo_Descriptor.Merge(m, src) -} -func (m *Descriptor) XXX_Size() int { - return m.Size() -} -func (m *Descriptor) XXX_DiscardUnknown() { - xxx_messageInfo_Descriptor.DiscardUnknown(m) -} - -var xxx_messageInfo_Descriptor proto.InternalMessageInfo -func init() { - proto.RegisterType((*Descriptor)(nil), "containerd.types.Descriptor") - proto.RegisterMapType((map[string]string)(nil), "containerd.types.Descriptor.AnnotationsEntry") +func (x *Descriptor) Reset() { + *x = Descriptor{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_descriptor_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/types/descriptor.proto", fileDescriptor_37f958df3707db9e) +func (x *Descriptor) String() string { + return protoimpl.X.MessageStringOf(x) } -var fileDescriptor_37f958df3707db9e = []byte{ - // 311 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, - 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xa7, 0xa4, 0x16, - 0x27, 0x17, 0x65, 0x16, 0x94, 0xe4, 0x17, 0xe9, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, - 0x94, 0xe9, 0x81, 0x95, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88, - 0x3a, 0xa5, 0x39, 0x4c, 0x5c, 0x5c, 0x2e, 0x70, 0xcd, 0x42, 0xb2, 0x5c, 0x5c, 0xb9, 0xa9, 0x29, - 0x99, 0x89, 0xf1, 0x20, 0x3d, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x9c, 0x60, 0x91, 0x90, - 0xca, 0x82, 0x54, 0x21, 0x2f, 0x2e, 0xb6, 0x94, 0xcc, 0xf4, 0xd4, 0xe2, 0x12, 0x09, 0x26, 0x90, - 0x94, 0x93, 0xd1, 0x89, 0x7b, 0xf2, 0x0c, 0xb7, 0xee, 0xc9, 0x6b, 0x21, 0x39, 0x35, 0xbf, 0x20, - 0x35, 0x0f, 0x6e, 0x79, 0xb1, 0x7e, 0x7a, 0xbe, 0x2e, 0x44, 0x8b, 0x9e, 0x0b, 0x98, 0x0a, 0x82, - 0x9a, 0x20, 0x24, 0xc4, 0xc5, 0x52, 0x9c, 0x59, 0x95, 0x2a, 0xc1, 0xac, 0xc0, 0xa8, 0xc1, 0x1c, - 0x04, 0x66, 0x0b, 0xf9, 0x73, 0x71, 0x27, 0xe6, 0xe5, 0xe5, 0x97, 0x24, 0x96, 0x64, 0xe6, 0xe7, - 0x15, 0x4b, 0xb0, 0x2a, 0x30, 0x6b, 0x70, 0x1b, 0xe9, 0xea, 0xa1, 0xfb, 0x45, 0x0f, 0xe1, 0x62, - 0x3d, 0x47, 0x84, 0x7a, 0xd7, 0xbc, 0x92, 0xa2, 0xca, 0x20, 0x64, 0x13, 0xa4, 0xec, 0xb8, 0x04, - 0xd0, 0x15, 0x08, 0x09, 0x70, 0x31, 0x67, 0xa7, 0x56, 0x42, 0x3d, 0x07, 0x62, 0x0a, 0x89, 0x70, - 0xb1, 0x96, 0x25, 0xe6, 0x94, 0xa6, 0x42, 0x7c, 0x15, 0x04, 0xe1, 0x58, 0x31, 0x59, 0x30, 0x3a, - 0x79, 0x9d, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0x43, 0xc3, 0x23, 0x39, 0xc6, 0x13, 0x8f, - 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x31, 0xca, 0x80, 0xf8, 0xd8, 0xb1, - 0x06, 0x93, 0x11, 0x0c, 0x49, 0x6c, 0xe0, 0x30, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x22, - 0x8a, 0x20, 0x4a, 0xda, 0x01, 0x00, 0x00, -} +func (*Descriptor) ProtoMessage() {} -func (m *Descriptor) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *Descriptor) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_descriptor_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *Descriptor) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use Descriptor.ProtoReflect.Descriptor instead. +func (*Descriptor) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_descriptor_proto_rawDescGZIP(), []int{0} } -func (m *Descriptor) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) +func (x *Descriptor) GetMediaType() string { + if x != nil { + return x.MediaType } - if len(m.Annotations) > 0 { - for k := range m.Annotations { - v := m.Annotations[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintDescriptor(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintDescriptor(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintDescriptor(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x2a - } - } - if m.Size_ != 0 { - i = encodeVarintDescriptor(dAtA, i, uint64(m.Size_)) - i-- - dAtA[i] = 0x18 - } - if len(m.Digest) > 0 { - i -= len(m.Digest) - copy(dAtA[i:], m.Digest) - i = encodeVarintDescriptor(dAtA, i, uint64(len(m.Digest))) - i-- - dAtA[i] = 0x12 - } - if len(m.MediaType) > 0 { - i -= len(m.MediaType) - copy(dAtA[i:], m.MediaType) - i = encodeVarintDescriptor(dAtA, i, uint64(len(m.MediaType))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return "" } -func encodeVarintDescriptor(dAtA []byte, offset int, v uint64) int { - offset -= sovDescriptor(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Descriptor) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.MediaType) - if l > 0 { - n += 1 + l + sovDescriptor(uint64(l)) - } - l = len(m.Digest) - if l > 0 { - n += 1 + l + sovDescriptor(uint64(l)) - } - if m.Size_ != 0 { - n += 1 + sovDescriptor(uint64(m.Size_)) - } - if len(m.Annotations) > 0 { - for k, v := range m.Annotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovDescriptor(uint64(len(k))) + 1 + len(v) + sovDescriptor(uint64(len(v))) - n += mapEntrySize + 1 + sovDescriptor(uint64(mapEntrySize)) - } +func (x *Descriptor) GetDigest() string { + if x != nil { + return x.Digest } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n + return "" } -func sovDescriptor(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozDescriptor(x uint64) (n int) { - return sovDescriptor(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Descriptor) String() string { - if this == nil { - return "nil" +func (x *Descriptor) GetSize() int64 { + if x != nil { + return x.Size } - keysForAnnotations := make([]string, 0, len(this.Annotations)) - for k, _ := range this.Annotations { - keysForAnnotations = append(keysForAnnotations, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) - mapStringForAnnotations := "map[string]string{" - for _, k := range keysForAnnotations { - mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) - } - mapStringForAnnotations += "}" - s := strings.Join([]string{`&Descriptor{`, - `MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`, - `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, - `Size_:` + fmt.Sprintf("%v", this.Size_) + `,`, - `Annotations:` + mapStringForAnnotations + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s + return 0 } -func valueToStringDescriptor(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Descriptor) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDescriptor - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Descriptor: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Descriptor: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDescriptor - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthDescriptor - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthDescriptor - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MediaType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDescriptor - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthDescriptor - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthDescriptor - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) - } - m.Size_ = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDescriptor - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Size_ |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDescriptor - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDescriptor - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDescriptor - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Annotations == nil { - m.Annotations = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDescriptor - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDescriptor - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthDescriptor - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthDescriptor - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDescriptor - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthDescriptor - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthDescriptor - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipDescriptor(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDescriptor - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Annotations[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDescriptor(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDescriptor - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *Descriptor) GetAnnotations() map[string]string { + if x != nil { + return x.Annotations } return nil } -func skipDescriptor(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDescriptor - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDescriptor - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDescriptor - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthDescriptor - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupDescriptor - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthDescriptor - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF + +var File_github_com_containerd_containerd_api_types_descriptor_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_types_descriptor_proto_rawDesc = []byte{ + 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, + 0xe8, 0x01, 0x0a, 0x0a, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x1d, + 0x0a, 0x0a, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, + 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, + 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x4f, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x41, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, + 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, + 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( - ErrInvalidLengthDescriptor = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowDescriptor = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupDescriptor = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_types_descriptor_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_types_descriptor_proto_rawDescData = file_github_com_containerd_containerd_api_types_descriptor_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_types_descriptor_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_types_descriptor_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_types_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_descriptor_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_types_descriptor_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_types_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_github_com_containerd_containerd_api_types_descriptor_proto_goTypes = []interface{}{ + (*Descriptor)(nil), // 0: containerd.types.Descriptor + nil, // 1: containerd.types.Descriptor.AnnotationsEntry +} +var file_github_com_containerd_containerd_api_types_descriptor_proto_depIdxs = []int32{ + 1, // 0: containerd.types.Descriptor.annotations:type_name -> containerd.types.Descriptor.AnnotationsEntry + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_types_descriptor_proto_init() } +func file_github_com_containerd_containerd_api_types_descriptor_proto_init() { + if File_github_com_containerd_containerd_api_types_descriptor_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_types_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Descriptor); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_types_descriptor_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_types_descriptor_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_types_descriptor_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_types_descriptor_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_types_descriptor_proto = out.File + file_github_com_containerd_containerd_api_types_descriptor_proto_rawDesc = nil + file_github_com_containerd_containerd_api_types_descriptor_proto_goTypes = nil + file_github_com_containerd_containerd_api_types_descriptor_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/types/descriptor.proto b/vendor/github.com/containerd/containerd/api/types/descriptor.proto index a841d1bb22d44..faaf416dd10cc 100644 --- a/vendor/github.com/containerd/containerd/api/types/descriptor.proto +++ b/vendor/github.com/containerd/containerd/api/types/descriptor.proto @@ -18,8 +18,6 @@ syntax = "proto3"; package containerd.types; -import weak "gogoproto/gogo.proto"; - option go_package = "github.com/containerd/containerd/api/types;types"; // Descriptor describes a blob in a content store. @@ -29,7 +27,7 @@ option go_package = "github.com/containerd/containerd/api/types;types"; // See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor message Descriptor { string media_type = 1; - string digest = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + string digest = 2; int64 size = 3; map annotations = 5; } diff --git a/vendor/github.com/containerd/containerd/api/types/metrics.pb.go b/vendor/github.com/containerd/containerd/api/types/metrics.pb.go index 75773e442ab74..b18ce1c5b6099 100644 --- a/vendor/github.com/containerd/containerd/api/types/metrics.pb.go +++ b/vendor/github.com/containerd/containerd/api/types/metrics.pb.go @@ -1,450 +1,194 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/types/metrics.proto package types import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types "github.com/gogo/protobuf/types" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - strings "strings" - time "time" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type Metric struct { - Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,proto3,stdtime" json:"timestamp"` - ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` - Data *types.Any `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Metric) Reset() { *m = Metric{} } -func (*Metric) ProtoMessage() {} -func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_8d594d87edf6e6bc, []int{0} -} -func (m *Metric) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Metric.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Metric) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metric.Merge(m, src) -} -func (m *Metric) XXX_Size() int { - return m.Size() + Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Data *anypb.Any `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` } -func (m *Metric) XXX_DiscardUnknown() { - xxx_messageInfo_Metric.DiscardUnknown(m) -} - -var xxx_messageInfo_Metric proto.InternalMessageInfo -func init() { - proto.RegisterType((*Metric)(nil), "containerd.types.Metric") +func (x *Metric) Reset() { + *x = Metric{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_metrics_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/types/metrics.proto", fileDescriptor_8d594d87edf6e6bc) +func (x *Metric) String() string { + return protoimpl.X.MessageStringOf(x) } -var fileDescriptor_8d594d87edf6e6bc = []byte{ - // 258 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x48, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, - 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xa6, 0x96, - 0x14, 0x65, 0x26, 0x17, 0xeb, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0xd4, 0xe8, 0x81, - 0xe5, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x94, 0x64, - 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x3e, 0x98, 0x97, 0x54, 0x9a, 0xa6, 0x9f, 0x98, 0x57, 0x09, - 0x95, 0x92, 0x47, 0x97, 0x2a, 0xc9, 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x80, 0x28, 0x50, - 0xea, 0x63, 0xe4, 0x62, 0xf3, 0x05, 0xdb, 0x2a, 0xe4, 0xc4, 0xc5, 0x09, 0x97, 0x95, 0x60, 0x54, - 0x60, 0xd4, 0xe0, 0x36, 0x92, 0xd2, 0x83, 0xe8, 0xd7, 0x83, 0xe9, 0xd7, 0x0b, 0x81, 0xa9, 0x70, - 0xe2, 0x38, 0x71, 0x4f, 0x9e, 0x61, 0xc2, 0x7d, 0x79, 0xc6, 0x20, 0x84, 0x36, 0x21, 0x31, 0x2e, - 0xa6, 0xcc, 0x14, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0x3c, - 0x5d, 0x82, 0x98, 0x32, 0x53, 0x84, 0x34, 0xb8, 0x58, 0x52, 0x12, 0x4b, 0x12, 0x25, 0x98, 0xc1, - 0xc6, 0x8a, 0x60, 0x18, 0xeb, 0x98, 0x57, 0x19, 0x04, 0x56, 0xe1, 0xe4, 0x75, 0xe2, 0xa1, 0x1c, - 0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x0d, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, - 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x28, 0x03, 0xe2, 0x03, 0xd2, 0x1a, 0x4c, 0x46, 0x30, 0x24, - 0xb1, 0x81, 0x6d, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xde, 0x0d, 0x02, 0xfe, 0x85, 0x01, - 0x00, 0x00, -} +func (*Metric) ProtoMessage() {} -func (m *Metric) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *Metric) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_metrics_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *Metric) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use Metric.ProtoReflect.Descriptor instead. +func (*Metric) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_metrics_proto_rawDescGZIP(), []int{0} } -func (m *Metric) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Data != nil { - { - size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a +func (x *Metric) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintMetrics(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0x12 - } - n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err2 != nil { - return 0, err2 - } - i -= n2 - i = encodeVarintMetrics(dAtA, i, uint64(n2)) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return nil } -func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int { - offset -= sovMetrics(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Metric) Size() (n int) { - if m == nil { - return 0 +func (x *Metric) GetID() string { + if x != nil { + return x.ID } - var l int - _ = l - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovMetrics(uint64(l)) - l = len(m.ID) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - if m.Data != nil { - l = m.Data.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n + return "" } -func sovMetrics(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozMetrics(x uint64) (n int) { - return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Metric) String() string { - if this == nil { - return "nil" +func (x *Metric) GetData() *anypb.Any { + if x != nil { + return x.Data } - s := strings.Join([]string{`&Metric{`, - `Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "Any", "types.Any", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringMetrics(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } -func (m *Metric) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Metric: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Data == nil { - m.Data = &types.Any{} - } - if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil +var File_github_com_containerd_containerd_api_types_metrics_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_types_metrics_proto_rawDesc = []byte{ + 0x0a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x1a, 0x19, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x7c, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, + 0x69, 0x63, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x28, 0x0a, 0x04, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, + 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } -func skipMetrics(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthMetrics - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupMetrics + +var ( + file_github_com_containerd_containerd_api_types_metrics_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_types_metrics_proto_rawDescData = file_github_com_containerd_containerd_api_types_metrics_proto_rawDesc +) + +func file_github_com_containerd_containerd_api_types_metrics_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_types_metrics_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_types_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_metrics_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_types_metrics_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_types_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_github_com_containerd_containerd_api_types_metrics_proto_goTypes = []interface{}{ + (*Metric)(nil), // 0: containerd.types.Metric + (*timestamppb.Timestamp)(nil), // 1: google.protobuf.Timestamp + (*anypb.Any)(nil), // 2: google.protobuf.Any +} +var file_github_com_containerd_containerd_api_types_metrics_proto_depIdxs = []int32{ + 1, // 0: containerd.types.Metric.timestamp:type_name -> google.protobuf.Timestamp + 2, // 1: containerd.types.Metric.data:type_name -> google.protobuf.Any + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_types_metrics_proto_init() } +func file_github_com_containerd_containerd_api_types_metrics_proto_init() { + if File_github_com_containerd_containerd_api_types_metrics_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_types_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metric); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthMetrics - } - if depth == 0 { - return iNdEx, nil } } - return 0, io.ErrUnexpectedEOF + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_types_metrics_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_types_metrics_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_types_metrics_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_types_metrics_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_types_metrics_proto = out.File + file_github_com_containerd_containerd_api_types_metrics_proto_rawDesc = nil + file_github_com_containerd_containerd_api_types_metrics_proto_goTypes = nil + file_github_com_containerd_containerd_api_types_metrics_proto_depIdxs = nil } - -var ( - ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupMetrics = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/containerd/containerd/api/types/metrics.proto b/vendor/github.com/containerd/containerd/api/types/metrics.proto index b8bc673267fc5..3e6a7751e37ca 100644 --- a/vendor/github.com/containerd/containerd/api/types/metrics.proto +++ b/vendor/github.com/containerd/containerd/api/types/metrics.proto @@ -18,14 +18,13 @@ syntax = "proto3"; package containerd.types; -import weak "gogoproto/gogo.proto"; import "google/protobuf/any.proto"; import "google/protobuf/timestamp.proto"; option go_package = "github.com/containerd/containerd/api/types;types"; message Metric { - google.protobuf.Timestamp timestamp = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp timestamp = 1; string id = 2; google.protobuf.Any data = 3; } diff --git a/vendor/github.com/containerd/containerd/api/types/mount.pb.go b/vendor/github.com/containerd/containerd/api/types/mount.pb.go index d0a0bee761f03..ff77a7d7bd228 100644 --- a/vendor/github.com/containerd/containerd/api/types/mount.pb.go +++ b/vendor/github.com/containerd/containerd/api/types/mount.pb.go @@ -1,28 +1,39 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/types/mount.proto package types import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // Mount describes mounts for a container. // @@ -32,6 +43,10 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // The Mount type follows the structure of the mount syscall, including a type, // source, target and options. type Mount struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Type defines the nature of the mount. Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` // Source specifies the name of the mount. Depending on mount type, this @@ -40,455 +55,148 @@ type Mount struct { // Target path in container Target string `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` // Options specifies zero or more fstab style mount options. - Options []string `protobuf:"bytes,4,rep,name=options,proto3" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Options []string `protobuf:"bytes,4,rep,name=options,proto3" json:"options,omitempty"` } -func (m *Mount) Reset() { *m = Mount{} } -func (*Mount) ProtoMessage() {} -func (*Mount) Descriptor() ([]byte, []int) { - return fileDescriptor_920196890d4a7b9f, []int{0} -} -func (m *Mount) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Mount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Mount.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *Mount) Reset() { + *x = Mount{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_mount_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *Mount) XXX_Merge(src proto.Message) { - xxx_messageInfo_Mount.Merge(m, src) -} -func (m *Mount) XXX_Size() int { - return m.Size() -} -func (m *Mount) XXX_DiscardUnknown() { - xxx_messageInfo_Mount.DiscardUnknown(m) -} - -var xxx_messageInfo_Mount proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Mount)(nil), "containerd.types.Mount") -} -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/types/mount.proto", fileDescriptor_920196890d4a7b9f) +func (x *Mount) String() string { + return protoimpl.X.MessageStringOf(x) } -var fileDescriptor_920196890d4a7b9f = []byte{ - // 202 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4b, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, - 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xe6, 0x97, - 0xe6, 0x95, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x54, 0xe8, 0x81, 0x65, 0xa5, - 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x52, 0x2a, 0x17, 0xab, - 0x2f, 0x48, 0x9b, 0x90, 0x10, 0x17, 0x0b, 0x48, 0x9d, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, - 0x98, 0x2d, 0x24, 0xc6, 0xc5, 0x56, 0x9c, 0x5f, 0x5a, 0x94, 0x9c, 0x2a, 0xc1, 0x04, 0x16, 0x85, - 0xf2, 0x40, 0xe2, 0x25, 0x89, 0x45, 0xe9, 0xa9, 0x25, 0x12, 0xcc, 0x10, 0x71, 0x08, 0x4f, 0x48, - 0x82, 0x8b, 0x3d, 0xbf, 0xa0, 0x24, 0x33, 0x3f, 0xaf, 0x58, 0x82, 0x45, 0x81, 0x59, 0x83, 0x33, - 0x08, 0xc6, 0x75, 0xf2, 0x3a, 0xf1, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, - 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x01, - 0xf1, 0x1e, 0xb4, 0x06, 0x93, 0x11, 0x0c, 0x49, 0x6c, 0x60, 0xb7, 0x1b, 0x03, 0x02, 0x00, 0x00, - 0xff, 0xff, 0x82, 0x1c, 0x02, 0x18, 0x1d, 0x01, 0x00, 0x00, -} +func (*Mount) ProtoMessage() {} -func (m *Mount) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *Mount) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_mount_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *Mount) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use Mount.ProtoReflect.Descriptor instead. +func (*Mount) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_mount_proto_rawDescGZIP(), []int{0} } -func (m *Mount) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) +func (x *Mount) GetType() string { + if x != nil { + return x.Type } - if len(m.Options) > 0 { - for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Options[iNdEx]) - copy(dAtA[i:], m.Options[iNdEx]) - i = encodeVarintMount(dAtA, i, uint64(len(m.Options[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - if len(m.Target) > 0 { - i -= len(m.Target) - copy(dAtA[i:], m.Target) - i = encodeVarintMount(dAtA, i, uint64(len(m.Target))) - i-- - dAtA[i] = 0x1a - } - if len(m.Source) > 0 { - i -= len(m.Source) - copy(dAtA[i:], m.Source) - i = encodeVarintMount(dAtA, i, uint64(len(m.Source))) - i-- - dAtA[i] = 0x12 - } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintMount(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return "" } -func encodeVarintMount(dAtA []byte, offset int, v uint64) int { - offset -= sovMount(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Mount) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Type) - if l > 0 { - n += 1 + l + sovMount(uint64(l)) - } - l = len(m.Source) - if l > 0 { - n += 1 + l + sovMount(uint64(l)) - } - l = len(m.Target) - if l > 0 { - n += 1 + l + sovMount(uint64(l)) - } - if len(m.Options) > 0 { - for _, s := range m.Options { - l = len(s) - n += 1 + l + sovMount(uint64(l)) - } +func (x *Mount) GetSource() string { + if x != nil { + return x.Source } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n + return "" } -func sovMount(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozMount(x uint64) (n int) { - return sovMount(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Mount) String() string { - if this == nil { - return "nil" +func (x *Mount) GetTarget() string { + if x != nil { + return x.Target } - s := strings.Join([]string{`&Mount{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Source:` + fmt.Sprintf("%v", this.Source) + `,`, - `Target:` + fmt.Sprintf("%v", this.Target) + `,`, - `Options:` + fmt.Sprintf("%v", this.Options) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s + return "" } -func valueToStringMount(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Mount) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMount - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Mount: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Mount: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMount - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMount - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMount - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMount - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMount - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMount - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Source = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMount - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMount - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMount - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Target = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMount - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMount - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMount - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Options = append(m.Options, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMount(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMount - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *Mount) GetOptions() []string { + if x != nil { + return x.Options } return nil } -func skipMount(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMount - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMount - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMount - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthMount - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupMount - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthMount - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF + +var File_github_com_containerd_containerd_api_types_mount_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_types_mount_proto_rawDesc = []byte{ + 0x0a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6d, 0x6f, 0x75, + 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, 0x65, 0x0a, 0x05, 0x4d, 0x6f, + 0x75, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, + 0x16, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( - ErrInvalidLengthMount = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowMount = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupMount = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_api_types_mount_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_types_mount_proto_rawDescData = file_github_com_containerd_containerd_api_types_mount_proto_rawDesc ) + +func file_github_com_containerd_containerd_api_types_mount_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_types_mount_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_types_mount_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_mount_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_types_mount_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_types_mount_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_github_com_containerd_containerd_api_types_mount_proto_goTypes = []interface{}{ + (*Mount)(nil), // 0: containerd.types.Mount +} +var file_github_com_containerd_containerd_api_types_mount_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_types_mount_proto_init() } +func file_github_com_containerd_containerd_api_types_mount_proto_init() { + if File_github_com_containerd_containerd_api_types_mount_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_types_mount_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Mount); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_types_mount_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_types_mount_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_types_mount_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_types_mount_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_types_mount_proto = out.File + file_github_com_containerd_containerd_api_types_mount_proto_rawDesc = nil + file_github_com_containerd_containerd_api_types_mount_proto_goTypes = nil + file_github_com_containerd_containerd_api_types_mount_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/types/mount.proto b/vendor/github.com/containerd/containerd/api/types/mount.proto index 41ab13313871e..54e0a0cddfa40 100644 --- a/vendor/github.com/containerd/containerd/api/types/mount.proto +++ b/vendor/github.com/containerd/containerd/api/types/mount.proto @@ -18,8 +18,6 @@ syntax = "proto3"; package containerd.types; -import weak "gogoproto/gogo.proto"; - option go_package = "github.com/containerd/containerd/api/types;types"; // Mount describes mounts for a container. diff --git a/vendor/github.com/containerd/containerd/api/types/platform.pb.go b/vendor/github.com/containerd/containerd/api/types/platform.pb.go index a0f78c8a769d6..3e206cbafbdac 100644 --- a/vendor/github.com/containerd/containerd/api/types/platform.pb.go +++ b/vendor/github.com/containerd/containerd/api/types/platform.pb.go @@ -1,435 +1,184 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/types/platform.proto package types import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) // Platform follows the structure of the OCI platform specification, from // descriptors. type Platform struct { - OS string `protobuf:"bytes,1,opt,name=os,proto3" json:"os,omitempty"` - Architecture string `protobuf:"bytes,2,opt,name=architecture,proto3" json:"architecture,omitempty"` - Variant string `protobuf:"bytes,3,opt,name=variant,proto3" json:"variant,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *Platform) Reset() { *m = Platform{} } -func (*Platform) ProtoMessage() {} -func (*Platform) Descriptor() ([]byte, []int) { - return fileDescriptor_24ba7a4b83e2367e, []int{0} -} -func (m *Platform) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Platform) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Platform.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Platform) XXX_Merge(src proto.Message) { - xxx_messageInfo_Platform.Merge(m, src) + OS string `protobuf:"bytes,1,opt,name=os,proto3" json:"os,omitempty"` + Architecture string `protobuf:"bytes,2,opt,name=architecture,proto3" json:"architecture,omitempty"` + Variant string `protobuf:"bytes,3,opt,name=variant,proto3" json:"variant,omitempty"` } -func (m *Platform) XXX_Size() int { - return m.Size() -} -func (m *Platform) XXX_DiscardUnknown() { - xxx_messageInfo_Platform.DiscardUnknown(m) -} - -var xxx_messageInfo_Platform proto.InternalMessageInfo -func init() { - proto.RegisterType((*Platform)(nil), "containerd.types.Platform") +func (x *Platform) Reset() { + *x = Platform{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_platform_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/types/platform.proto", fileDescriptor_24ba7a4b83e2367e) +func (x *Platform) String() string { + return protoimpl.X.MessageStringOf(x) } -var fileDescriptor_24ba7a4b83e2367e = []byte{ - // 205 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, - 0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0x17, 0xe4, 0x24, - 0x96, 0xa4, 0xe5, 0x17, 0xe5, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x14, 0xe9, - 0x81, 0x15, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88, 0x3a, 0xa5, - 0x04, 0x2e, 0x8e, 0x00, 0xa8, 0x4e, 0x21, 0x31, 0x2e, 0xa6, 0xfc, 0x62, 0x09, 0x46, 0x05, 0x46, - 0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0xfc, 0x83, 0x83, 0x98, 0xf2, 0x8b, 0x85, 0x94, - 0xb8, 0x78, 0x12, 0x8b, 0x92, 0x33, 0x32, 0x4b, 0x52, 0x93, 0x4b, 0x4a, 0x8b, 0x52, 0x25, 0x98, - 0x40, 0x2a, 0x82, 0x50, 0xc4, 0x84, 0x24, 0xb8, 0xd8, 0xcb, 0x12, 0x8b, 0x32, 0x13, 0xf3, 0x4a, - 0x24, 0x98, 0xc1, 0xd2, 0x30, 0xae, 0x93, 0xd7, 0x89, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, - 0x34, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, - 0x18, 0xa3, 0x0c, 0x88, 0xf7, 0x9e, 0x35, 0x98, 0x8c, 0x60, 0x48, 0x62, 0x03, 0x3b, 0xdb, 0x18, - 0x10, 0x00, 0x00, 0xff, 0xff, 0x05, 0xaa, 0xda, 0xa1, 0x1b, 0x01, 0x00, 0x00, -} +func (*Platform) ProtoMessage() {} -func (m *Platform) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *Platform) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_platform_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return dAtA[:n], nil + return mi.MessageOf(x) } -func (m *Platform) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +// Deprecated: Use Platform.ProtoReflect.Descriptor instead. +func (*Platform) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_platform_proto_rawDescGZIP(), []int{0} } -func (m *Platform) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Variant) > 0 { - i -= len(m.Variant) - copy(dAtA[i:], m.Variant) - i = encodeVarintPlatform(dAtA, i, uint64(len(m.Variant))) - i-- - dAtA[i] = 0x1a +func (x *Platform) GetOS() string { + if x != nil { + return x.OS } - if len(m.Architecture) > 0 { - i -= len(m.Architecture) - copy(dAtA[i:], m.Architecture) - i = encodeVarintPlatform(dAtA, i, uint64(len(m.Architecture))) - i-- - dAtA[i] = 0x12 - } - if len(m.OS) > 0 { - i -= len(m.OS) - copy(dAtA[i:], m.OS) - i = encodeVarintPlatform(dAtA, i, uint64(len(m.OS))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return "" } -func encodeVarintPlatform(dAtA []byte, offset int, v uint64) int { - offset -= sovPlatform(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Platform) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.OS) - if l > 0 { - n += 1 + l + sovPlatform(uint64(l)) +func (x *Platform) GetArchitecture() string { + if x != nil { + return x.Architecture } - l = len(m.Architecture) - if l > 0 { - n += 1 + l + sovPlatform(uint64(l)) - } - l = len(m.Variant) - if l > 0 { - n += 1 + l + sovPlatform(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n + return "" } -func sovPlatform(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozPlatform(x uint64) (n int) { - return sovPlatform(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Platform) String() string { - if this == nil { - return "nil" +func (x *Platform) GetVariant() string { + if x != nil { + return x.Variant } - s := strings.Join([]string{`&Platform{`, - `OS:` + fmt.Sprintf("%v", this.OS) + `,`, - `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`, - `Variant:` + fmt.Sprintf("%v", this.Variant) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s + return "" } -func valueToStringPlatform(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Platform) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlatform - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Platform: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Platform: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OS", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlatform - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlatform - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlatform - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OS = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlatform - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlatform - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlatform - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Architecture = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Variant", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlatform - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlatform - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthPlatform - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Variant = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPlatform(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthPlatform - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil +var File_github_com_containerd_containerd_api_types_platform_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_types_platform_proto_rawDesc = []byte{ + 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x6c, 0x61, + 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x22, 0x58, 0x0a, + 0x08, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x6f, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x72, 0x63, + 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x61, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x76, 0x61, 0x72, 0x69, 0x61, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x76, 0x61, 0x72, 0x69, 0x61, 0x6e, 0x74, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } -func skipPlatform(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlatform - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlatform - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlatform - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthPlatform - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupPlatform + +var ( + file_github_com_containerd_containerd_api_types_platform_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_types_platform_proto_rawDescData = file_github_com_containerd_containerd_api_types_platform_proto_rawDesc +) + +func file_github_com_containerd_containerd_api_types_platform_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_types_platform_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_types_platform_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_platform_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_types_platform_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_types_platform_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_github_com_containerd_containerd_api_types_platform_proto_goTypes = []interface{}{ + (*Platform)(nil), // 0: containerd.types.Platform +} +var file_github_com_containerd_containerd_api_types_platform_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_types_platform_proto_init() } +func file_github_com_containerd_containerd_api_types_platform_proto_init() { + if File_github_com_containerd_containerd_api_types_platform_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_types_platform_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Platform); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthPlatform - } - if depth == 0 { - return iNdEx, nil } } - return 0, io.ErrUnexpectedEOF + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_types_platform_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_types_platform_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_types_platform_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_types_platform_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_types_platform_proto = out.File + file_github_com_containerd_containerd_api_types_platform_proto_rawDesc = nil + file_github_com_containerd_containerd_api_types_platform_proto_goTypes = nil + file_github_com_containerd_containerd_api_types_platform_proto_depIdxs = nil } - -var ( - ErrInvalidLengthPlatform = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowPlatform = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupPlatform = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/containerd/containerd/api/types/platform.proto b/vendor/github.com/containerd/containerd/api/types/platform.proto index 7813606841104..b6088251f0074 100644 --- a/vendor/github.com/containerd/containerd/api/types/platform.proto +++ b/vendor/github.com/containerd/containerd/api/types/platform.proto @@ -18,14 +18,12 @@ syntax = "proto3"; package containerd.types; -import weak "gogoproto/gogo.proto"; - option go_package = "github.com/containerd/containerd/api/types;types"; // Platform follows the structure of the OCI platform specification, from // descriptors. message Platform { - string os = 1 [(gogoproto.customname) = "OS"]; + string os = 1; string architecture = 2; string variant = 3; } diff --git a/vendor/github.com/containerd/containerd/api/types/sandbox.pb.go b/vendor/github.com/containerd/containerd/api/types/sandbox.pb.go new file mode 100644 index 0000000000000..67594f416c81d --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/sandbox.pb.go @@ -0,0 +1,346 @@ +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 +// source: github.com/containerd/containerd/api/types/sandbox.proto + +package types + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Sandbox represents a sandbox metadata object that keeps all info required by controller to +// work with a particular instance. +type Sandbox struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // SandboxID is a unique instance identifier within namespace + SandboxID string `protobuf:"bytes,1,opt,name=sandbox_id,json=sandboxId,proto3" json:"sandbox_id,omitempty"` + // Runtime specifies which runtime to use for executing this container. + Runtime *Sandbox_Runtime `protobuf:"bytes,2,opt,name=runtime,proto3" json:"runtime,omitempty"` + // Spec is sandbox configuration (kin of OCI runtime spec), spec's data will be written to a config.json file in the + // bundle directory (similary to OCI spec). + Spec *anypb.Any `protobuf:"bytes,3,opt,name=spec,proto3" json:"spec,omitempty"` + // Labels provides an area to include arbitrary data on containers. + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // CreatedAt is the time the container was first created. + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + // UpdatedAt is the last time the container was mutated. + UpdatedAt *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` + // Extensions allow clients to provide optional blobs that can be handled by runtime. + Extensions map[string]*anypb.Any `protobuf:"bytes,7,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Sandbox) Reset() { + *x = Sandbox{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_sandbox_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Sandbox) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Sandbox) ProtoMessage() {} + +func (x *Sandbox) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_sandbox_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Sandbox.ProtoReflect.Descriptor instead. +func (*Sandbox) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_sandbox_proto_rawDescGZIP(), []int{0} +} + +func (x *Sandbox) GetSandboxID() string { + if x != nil { + return x.SandboxID + } + return "" +} + +func (x *Sandbox) GetRuntime() *Sandbox_Runtime { + if x != nil { + return x.Runtime + } + return nil +} + +func (x *Sandbox) GetSpec() *anypb.Any { + if x != nil { + return x.Spec + } + return nil +} + +func (x *Sandbox) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *Sandbox) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil +} + +func (x *Sandbox) GetUpdatedAt() *timestamppb.Timestamp { + if x != nil { + return x.UpdatedAt + } + return nil +} + +func (x *Sandbox) GetExtensions() map[string]*anypb.Any { + if x != nil { + return x.Extensions + } + return nil +} + +type Sandbox_Runtime struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name is the name of the runtime. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Options specify additional runtime initialization options for the shim (this data will be available in StartShim). + // Typically this data expected to be runtime shim implementation specific. + Options *anypb.Any `protobuf:"bytes,2,opt,name=options,proto3" json:"options,omitempty"` +} + +func (x *Sandbox_Runtime) Reset() { + *x = Sandbox_Runtime{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_sandbox_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Sandbox_Runtime) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Sandbox_Runtime) ProtoMessage() {} + +func (x *Sandbox_Runtime) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_sandbox_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Sandbox_Runtime.ProtoReflect.Descriptor instead. +func (*Sandbox_Runtime) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_sandbox_proto_rawDescGZIP(), []int{0, 0} +} + +func (x *Sandbox_Runtime) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Sandbox_Runtime) GetOptions() *anypb.Any { + if x != nil { + return x.Options + } + return nil +} + +var File_github_com_containerd_containerd_api_types_sandbox_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_types_sandbox_proto_rawDesc = []byte{ + 0x0a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x73, 0x61, 0x6e, + 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x1a, 0x19, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xee, 0x04, 0x0a, 0x07, 0x53, 0x61, 0x6e, + 0x64, 0x62, 0x6f, 0x78, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x61, 0x6e, 0x64, 0x62, 0x6f, + 0x78, 0x49, 0x64, 0x12, 0x3b, 0x0a, 0x07, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, + 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x52, 0x07, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, + 0x12, 0x28, 0x0a, 0x04, 0x73, 0x70, 0x65, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x73, 0x70, 0x65, 0x63, 0x12, 0x3d, 0x0a, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x61, + 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x64, 0x41, 0x74, 0x12, 0x39, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, + 0x49, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x61, 0x6e, 0x64, 0x62, 0x6f, 0x78, 0x2e, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x4d, 0x0a, 0x07, 0x52, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, + 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x53, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_github_com_containerd_containerd_api_types_sandbox_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_types_sandbox_proto_rawDescData = file_github_com_containerd_containerd_api_types_sandbox_proto_rawDesc +) + +func file_github_com_containerd_containerd_api_types_sandbox_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_types_sandbox_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_types_sandbox_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_sandbox_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_types_sandbox_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_types_sandbox_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_github_com_containerd_containerd_api_types_sandbox_proto_goTypes = []interface{}{ + (*Sandbox)(nil), // 0: containerd.types.Sandbox + (*Sandbox_Runtime)(nil), // 1: containerd.types.Sandbox.Runtime + nil, // 2: containerd.types.Sandbox.LabelsEntry + nil, // 3: containerd.types.Sandbox.ExtensionsEntry + (*anypb.Any)(nil), // 4: google.protobuf.Any + (*timestamppb.Timestamp)(nil), // 5: google.protobuf.Timestamp +} +var file_github_com_containerd_containerd_api_types_sandbox_proto_depIdxs = []int32{ + 1, // 0: containerd.types.Sandbox.runtime:type_name -> containerd.types.Sandbox.Runtime + 4, // 1: containerd.types.Sandbox.spec:type_name -> google.protobuf.Any + 2, // 2: containerd.types.Sandbox.labels:type_name -> containerd.types.Sandbox.LabelsEntry + 5, // 3: containerd.types.Sandbox.created_at:type_name -> google.protobuf.Timestamp + 5, // 4: containerd.types.Sandbox.updated_at:type_name -> google.protobuf.Timestamp + 3, // 5: containerd.types.Sandbox.extensions:type_name -> containerd.types.Sandbox.ExtensionsEntry + 4, // 6: containerd.types.Sandbox.Runtime.options:type_name -> google.protobuf.Any + 4, // 7: containerd.types.Sandbox.ExtensionsEntry.value:type_name -> google.protobuf.Any + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_types_sandbox_proto_init() } +func file_github_com_containerd_containerd_api_types_sandbox_proto_init() { + if File_github_com_containerd_containerd_api_types_sandbox_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_types_sandbox_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Sandbox); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_types_sandbox_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Sandbox_Runtime); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_types_sandbox_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_types_sandbox_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_types_sandbox_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_types_sandbox_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_types_sandbox_proto = out.File + file_github_com_containerd_containerd_api_types_sandbox_proto_rawDesc = nil + file_github_com_containerd_containerd_api_types_sandbox_proto_goTypes = nil + file_github_com_containerd_containerd_api_types_sandbox_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/types/sandbox.proto b/vendor/github.com/containerd/containerd/api/types/sandbox.proto new file mode 100644 index 0000000000000..b607706194b23 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/sandbox.proto @@ -0,0 +1,51 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +syntax = "proto3"; + +package containerd.types; + +import "google/protobuf/any.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "github.com/containerd/containerd/api/types;types"; + +// Sandbox represents a sandbox metadata object that keeps all info required by controller to +// work with a particular instance. +message Sandbox { + // SandboxID is a unique instance identifier within namespace + string sandbox_id = 1; + message Runtime { + // Name is the name of the runtime. + string name = 1; + // Options specify additional runtime initialization options for the shim (this data will be available in StartShim). + // Typically this data expected to be runtime shim implementation specific. + google.protobuf.Any options = 2; + } + // Runtime specifies which runtime to use for executing this container. + Runtime runtime = 2; + // Spec is sandbox configuration (kin of OCI runtime spec), spec's data will be written to a config.json file in the + // bundle directory (similary to OCI spec). + google.protobuf.Any spec = 3; + // Labels provides an area to include arbitrary data on containers. + map labels = 4; + // CreatedAt is the time the container was first created. + google.protobuf.Timestamp created_at = 5; + // UpdatedAt is the last time the container was mutated. + google.protobuf.Timestamp updated_at = 6; + // Extensions allow clients to provide optional blobs that can be handled by runtime. + map extensions = 7; +} diff --git a/vendor/github.com/containerd/containerd/api/types/task/task.pb.go b/vendor/github.com/containerd/containerd/api/types/task/task.pb.go index f511bbd058c85..5c58d1ef18701 100644 --- a/vendor/github.com/containerd/containerd/api/types/task/task.pb.go +++ b/vendor/github.com/containerd/containerd/api/types/task/task.pb.go @@ -1,980 +1,406 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/api/types/task/task.proto package task import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types "github.com/gogo/protobuf/types" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" - strings "strings" - time "time" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type Status int32 const ( - StatusUnknown Status = 0 - StatusCreated Status = 1 - StatusRunning Status = 2 - StatusStopped Status = 3 - StatusPaused Status = 4 - StatusPausing Status = 5 + Status_UNKNOWN Status = 0 + Status_CREATED Status = 1 + Status_RUNNING Status = 2 + Status_STOPPED Status = 3 + Status_PAUSED Status = 4 + Status_PAUSING Status = 5 ) -var Status_name = map[int32]string{ - 0: "UNKNOWN", - 1: "CREATED", - 2: "RUNNING", - 3: "STOPPED", - 4: "PAUSED", - 5: "PAUSING", -} +// Enum value maps for Status. +var ( + Status_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CREATED", + 2: "RUNNING", + 3: "STOPPED", + 4: "PAUSED", + 5: "PAUSING", + } + Status_value = map[string]int32{ + "UNKNOWN": 0, + "CREATED": 1, + "RUNNING": 2, + "STOPPED": 3, + "PAUSED": 4, + "PAUSING": 5, + } +) -var Status_value = map[string]int32{ - "UNKNOWN": 0, - "CREATED": 1, - "RUNNING": 2, - "STOPPED": 3, - "PAUSED": 4, - "PAUSING": 5, +func (x Status) Enum() *Status { + p := new(Status) + *p = x + return p } func (x Status) String() string { - return proto.EnumName(Status_name, int32(x)) + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } -func (Status) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_391ef18c8ab0dc16, []int{0} +func (Status) Descriptor() protoreflect.EnumDescriptor { + return file_github_com_containerd_containerd_api_types_task_task_proto_enumTypes[0].Descriptor() } -type Process struct { - ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` - ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` - Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` - Status Status `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"` - Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"` - Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"` - Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"` - Terminal bool `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"` - ExitStatus uint32 `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` - ExitedAt time.Time `protobuf:"bytes,10,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Process) Reset() { *m = Process{} } -func (*Process) ProtoMessage() {} -func (*Process) Descriptor() ([]byte, []int) { - return fileDescriptor_391ef18c8ab0dc16, []int{0} -} -func (m *Process) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Process) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Process.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Process) XXX_Merge(src proto.Message) { - xxx_messageInfo_Process.Merge(m, src) +func (Status) Type() protoreflect.EnumType { + return &file_github_com_containerd_containerd_api_types_task_task_proto_enumTypes[0] } -func (m *Process) XXX_Size() int { - return m.Size() + +func (x Status) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) } -func (m *Process) XXX_DiscardUnknown() { - xxx_messageInfo_Process.DiscardUnknown(m) + +// Deprecated: Use Status.Descriptor instead. +func (Status) EnumDescriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_task_task_proto_rawDescGZIP(), []int{0} } -var xxx_messageInfo_Process proto.InternalMessageInfo +type Process struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -type ProcessInfo struct { - // PID is the process ID. - Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` - // Info contains additional process information. - // - // Info varies by platform. - Info *types.Any `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` + Status Status `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"` + Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"` + Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"` + Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"` + Terminal bool `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"` + ExitStatus uint32 `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` + ExitedAt *timestamppb.Timestamp `protobuf:"bytes,10,opt,name=exited_at,json=exitedAt,proto3" json:"exited_at,omitempty"` } -func (m *ProcessInfo) Reset() { *m = ProcessInfo{} } -func (*ProcessInfo) ProtoMessage() {} -func (*ProcessInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_391ef18c8ab0dc16, []int{1} -} -func (m *ProcessInfo) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProcessInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProcessInfo.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *Process) Reset() { + *x = Process{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_task_task_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ProcessInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProcessInfo.Merge(m, src) -} -func (m *ProcessInfo) XXX_Size() int { - return m.Size() -} -func (m *ProcessInfo) XXX_DiscardUnknown() { - xxx_messageInfo_ProcessInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_ProcessInfo proto.InternalMessageInfo - -func init() { - proto.RegisterEnum("containerd.v1.types.Status", Status_name, Status_value) - proto.RegisterType((*Process)(nil), "containerd.v1.types.Process") - proto.RegisterType((*ProcessInfo)(nil), "containerd.v1.types.ProcessInfo") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/types/task/task.proto", fileDescriptor_391ef18c8ab0dc16) -} - -var fileDescriptor_391ef18c8ab0dc16 = []byte{ - // 545 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x3f, 0x6f, 0xd3, 0x40, - 0x18, 0xc6, 0x7d, 0x6e, 0xeb, 0xa6, 0xe7, 0xb6, 0x18, 0x13, 0x55, 0xc6, 0x20, 0xdb, 0xea, 0x64, - 0x31, 0xd8, 0x22, 0xdd, 0xd8, 0xf2, 0x4f, 0xc8, 0x42, 0x72, 0x23, 0x27, 0x11, 0x6c, 0x91, 0x13, - 0x5f, 0xcc, 0xa9, 0xcd, 0x9d, 0x65, 0x9f, 0x81, 0x6c, 0x8c, 0xa8, 0x13, 0x5f, 0xa0, 0x13, 0x7c, - 0x0a, 0x3e, 0x41, 0x46, 0x26, 0xc4, 0x14, 0xa8, 0x3f, 0x09, 0x3a, 0xdb, 0x49, 0x23, 0x60, 0x39, - 0xbd, 0xef, 0xf3, 0x7b, 0xee, 0xbd, 0xf7, 0x1e, 0xf8, 0x22, 0xc6, 0xec, 0x6d, 0x3e, 0x75, 0x66, - 0x74, 0xe1, 0xce, 0x28, 0x61, 0x21, 0x26, 0x28, 0x8d, 0x76, 0xcb, 0x30, 0xc1, 0x2e, 0x5b, 0x26, - 0x28, 0x73, 0x59, 0x98, 0x5d, 0x95, 0x87, 0x93, 0xa4, 0x94, 0x51, 0xf5, 0xd1, 0xbd, 0xcb, 0x79, - 0xf7, 0xdc, 0x29, 0x4d, 0x7a, 0x33, 0xa6, 0x31, 0x2d, 0xb9, 0xcb, 0xab, 0xca, 0xaa, 0x9b, 0x31, - 0xa5, 0xf1, 0x35, 0x72, 0xcb, 0x6e, 0x9a, 0xcf, 0x5d, 0x86, 0x17, 0x28, 0x63, 0xe1, 0x22, 0xa9, - 0x0d, 0x8f, 0xff, 0x36, 0x84, 0x64, 0x59, 0xa1, 0xf3, 0x42, 0x84, 0x87, 0x83, 0x94, 0xce, 0x50, - 0x96, 0xa9, 0x2d, 0x78, 0xbc, 0x7d, 0x74, 0x82, 0x23, 0x0d, 0x58, 0xc0, 0x3e, 0xea, 0x3c, 0x28, - 0xd6, 0xa6, 0xdc, 0xdd, 0xe8, 0x5e, 0x2f, 0x90, 0xb7, 0x26, 0x2f, 0x52, 0xcf, 0xa0, 0x88, 0x23, - 0x4d, 0x2c, 0x9d, 0x52, 0xb1, 0x36, 0x45, 0xaf, 0x17, 0x88, 0x38, 0x52, 0x15, 0xb8, 0x97, 0xe0, - 0x48, 0xdb, 0xb3, 0x80, 0x7d, 0x12, 0xf0, 0x52, 0xbd, 0x80, 0x52, 0xc6, 0x42, 0x96, 0x67, 0xda, - 0xbe, 0x05, 0xec, 0xd3, 0xd6, 0x13, 0xe7, 0x3f, 0x3f, 0x74, 0x86, 0xa5, 0x25, 0xa8, 0xad, 0x6a, - 0x13, 0x1e, 0x64, 0x2c, 0xc2, 0x44, 0x3b, 0xe0, 0x2f, 0x04, 0x55, 0xa3, 0x9e, 0xf1, 0x51, 0x11, - 0xcd, 0x99, 0x26, 0x95, 0x72, 0xdd, 0xd5, 0x3a, 0x4a, 0x53, 0xed, 0x70, 0xab, 0xa3, 0x34, 0x55, - 0x75, 0xd8, 0x60, 0x28, 0x5d, 0x60, 0x12, 0x5e, 0x6b, 0x0d, 0x0b, 0xd8, 0x8d, 0x60, 0xdb, 0xab, - 0x26, 0x94, 0xd1, 0x07, 0xcc, 0x26, 0xf5, 0x6e, 0x47, 0xe5, 0xc2, 0x90, 0x4b, 0xd5, 0x2a, 0x6a, - 0x1b, 0x1e, 0xf1, 0x0e, 0x45, 0x93, 0x90, 0x69, 0xd0, 0x02, 0xb6, 0xdc, 0xd2, 0x9d, 0x2a, 0x50, - 0x67, 0x13, 0xa8, 0x33, 0xda, 0x24, 0xde, 0x69, 0xac, 0xd6, 0xa6, 0xf0, 0xf9, 0x97, 0x09, 0x82, - 0x46, 0x75, 0xad, 0xcd, 0xce, 0x3d, 0x28, 0xd7, 0x19, 0x7b, 0x64, 0x4e, 0x37, 0xd9, 0x80, 0xfb, - 0x6c, 0x6c, 0xb8, 0x8f, 0xc9, 0x9c, 0x96, 0x39, 0xca, 0xad, 0xe6, 0x3f, 0xe3, 0xdb, 0x64, 0x19, - 0x94, 0x8e, 0x67, 0x3f, 0x00, 0x94, 0xea, 0xc5, 0x0c, 0x78, 0x38, 0xf6, 0x5f, 0xf9, 0x97, 0xaf, - 0x7d, 0x45, 0xd0, 0x1f, 0xde, 0xdc, 0x5a, 0x27, 0x15, 0x18, 0x93, 0x2b, 0x42, 0xdf, 0x13, 0xce, - 0xbb, 0x41, 0xbf, 0x3d, 0xea, 0xf7, 0x14, 0xb0, 0xcb, 0xbb, 0x29, 0x0a, 0x19, 0x8a, 0x38, 0x0f, - 0xc6, 0xbe, 0xef, 0xf9, 0x2f, 0x15, 0x71, 0x97, 0x07, 0x39, 0x21, 0x98, 0xc4, 0x9c, 0x0f, 0x47, - 0x97, 0x83, 0x41, 0xbf, 0xa7, 0xec, 0xed, 0xf2, 0x21, 0xa3, 0x49, 0x82, 0x22, 0xf5, 0x29, 0x94, - 0x06, 0xed, 0xf1, 0xb0, 0xdf, 0x53, 0xf6, 0x75, 0xe5, 0xe6, 0xd6, 0x3a, 0xae, 0xf0, 0x20, 0xcc, - 0xb3, 0x6a, 0x3a, 0xa7, 0x7c, 0xfa, 0xc1, 0xee, 0x6d, 0x8e, 0x31, 0x89, 0xf5, 0xd3, 0x4f, 0x5f, - 0x0c, 0xe1, 0xdb, 0x57, 0xa3, 0xfe, 0x4d, 0x47, 0x5b, 0xdd, 0x19, 0xc2, 0xcf, 0x3b, 0x43, 0xf8, - 0x58, 0x18, 0x60, 0x55, 0x18, 0xe0, 0x7b, 0x61, 0x80, 0xdf, 0x85, 0x01, 0xde, 0x08, 0x53, 0xa9, - 0x0c, 0xe2, 0xe2, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x32, 0xd2, 0x86, 0x50, 0x03, 0x00, - 0x00, -} - -func (m *Process) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} -func (m *Process) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *Process) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Process) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt):]) - if err1 != nil { - return 0, err1 - } - i -= n1 - i = encodeVarintTask(dAtA, i, uint64(n1)) - i-- - dAtA[i] = 0x52 - if m.ExitStatus != 0 { - i = encodeVarintTask(dAtA, i, uint64(m.ExitStatus)) - i-- - dAtA[i] = 0x48 - } - if m.Terminal { - i-- - if m.Terminal { - dAtA[i] = 1 - } else { - dAtA[i] = 0 +func (*Process) ProtoMessage() {} + +func (x *Process) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_task_task_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - i-- - dAtA[i] = 0x40 - } - if len(m.Stderr) > 0 { - i -= len(m.Stderr) - copy(dAtA[i:], m.Stderr) - i = encodeVarintTask(dAtA, i, uint64(len(m.Stderr))) - i-- - dAtA[i] = 0x3a - } - if len(m.Stdout) > 0 { - i -= len(m.Stdout) - copy(dAtA[i:], m.Stdout) - i = encodeVarintTask(dAtA, i, uint64(len(m.Stdout))) - i-- - dAtA[i] = 0x32 - } - if len(m.Stdin) > 0 { - i -= len(m.Stdin) - copy(dAtA[i:], m.Stdin) - i = encodeVarintTask(dAtA, i, uint64(len(m.Stdin))) - i-- - dAtA[i] = 0x2a - } - if m.Status != 0 { - i = encodeVarintTask(dAtA, i, uint64(m.Status)) - i-- - dAtA[i] = 0x20 - } - if m.Pid != 0 { - i = encodeVarintTask(dAtA, i, uint64(m.Pid)) - i-- - dAtA[i] = 0x18 + return ms } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return mi.MessageOf(x) } -func (m *ProcessInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +// Deprecated: Use Process.ProtoReflect.Descriptor instead. +func (*Process) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_task_task_proto_rawDescGZIP(), []int{0} } -func (m *ProcessInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *Process) GetContainerID() string { + if x != nil { + return x.ContainerID + } + return "" } -func (m *ProcessInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) +func (x *Process) GetID() string { + if x != nil { + return x.ID } - if m.Info != nil { - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTask(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Pid != 0 { - i = encodeVarintTask(dAtA, i, uint64(m.Pid)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil + return "" } -func encodeVarintTask(dAtA []byte, offset int, v uint64) int { - offset -= sovTask(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (x *Process) GetPid() uint32 { + if x != nil { + return x.Pid } - dAtA[offset] = uint8(v) - return base + return 0 } -func (m *Process) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - l = len(m.ID) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - if m.Pid != 0 { - n += 1 + sovTask(uint64(m.Pid)) - } - if m.Status != 0 { - n += 1 + sovTask(uint64(m.Status)) - } - l = len(m.Stdin) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - l = len(m.Stdout) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - l = len(m.Stderr) - if l > 0 { - n += 1 + l + sovTask(uint64(l)) - } - if m.Terminal { - n += 2 - } - if m.ExitStatus != 0 { - n += 1 + sovTask(uint64(m.ExitStatus)) + +func (x *Process) GetStatus() Status { + if x != nil { + return x.Status } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt) - n += 1 + l + sovTask(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + return Status_UNKNOWN +} + +func (x *Process) GetStdin() string { + if x != nil { + return x.Stdin } - return n + return "" } -func (m *ProcessInfo) Size() (n int) { - if m == nil { - return 0 +func (x *Process) GetStdout() string { + if x != nil { + return x.Stdout } - var l int - _ = l - if m.Pid != 0 { - n += 1 + sovTask(uint64(m.Pid)) + return "" +} + +func (x *Process) GetStderr() string { + if x != nil { + return x.Stderr } - if m.Info != nil { - l = m.Info.Size() - n += 1 + l + sovTask(uint64(l)) + return "" +} + +func (x *Process) GetTerminal() bool { + if x != nil { + return x.Terminal } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + return false +} + +func (x *Process) GetExitStatus() uint32 { + if x != nil { + return x.ExitStatus } - return n + return 0 } -func sovTask(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 +func (x *Process) GetExitedAt() *timestamppb.Timestamp { + if x != nil { + return x.ExitedAt + } + return nil } -func sozTask(x uint64) (n int) { - return sovTask(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + +type ProcessInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // PID is the process ID. + Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` + // Info contains additional process information. + // + // Info varies by platform. + Info *anypb.Any `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` } -func (this *Process) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Process{`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, - `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, - `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, - `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, - `ExitedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExitedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ProcessInfo) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ProcessInfo{`, - `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `Info:` + strings.Replace(fmt.Sprintf("%v", this.Info), "Any", "types.Any", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringTask(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + +func (x *ProcessInfo) Reset() { + *x = ProcessInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_task_task_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Process) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Process: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Process: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) - } - m.Pid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - m.Status = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Status |= Status(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdin = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdout = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stderr = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Terminal = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) - } - m.ExitStatus = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExitStatus |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTask(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTask - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy +} + +func (x *ProcessInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProcessInfo) ProtoMessage() {} + +func (x *ProcessInfo) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_task_task_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } + return mi.MessageOf(x) +} - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil +// Deprecated: Use ProcessInfo.ProtoReflect.Descriptor instead. +func (*ProcessInfo) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_task_task_proto_rawDescGZIP(), []int{1} } -func (m *ProcessInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProcessInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProcessInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) - } - m.Pid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTask - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTask - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTask - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Info == nil { - m.Info = &types.Any{} - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTask(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTask - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } + +func (x *ProcessInfo) GetPid() uint32 { + if x != nil { + return x.Pid } + return 0 +} - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *ProcessInfo) GetInfo() *anypb.Any { + if x != nil { + return x.Info } return nil } -func skipTask(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTask - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + +var File_github_com_containerd_containerd_api_types_task_task_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_types_task_task_proto_rawDesc = []byte{ + 0x0a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x61, 0x73, + 0x6b, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbf, 0x02, + 0x0a, 0x07, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, + 0x70, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x33, + 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x64, 0x69, 0x6e, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x64, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x64, + 0x6f, 0x75, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, 0x6f, 0x75, + 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x64, 0x65, 0x72, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x73, 0x74, 0x64, 0x65, 0x72, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x74, 0x65, 0x72, + 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x74, 0x65, 0x72, + 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x69, 0x74, 0x5f, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x65, 0x78, 0x69, 0x74, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x09, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, + 0x5f, 0x61, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, + 0x49, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x10, + 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, + 0x12, 0x28, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x2a, 0x55, 0x0a, 0x06, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, + 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0b, + 0x0a, 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x53, + 0x54, 0x4f, 0x50, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x41, 0x55, 0x53, + 0x45, 0x44, 0x10, 0x04, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x41, 0x55, 0x53, 0x49, 0x4e, 0x47, 0x10, + 0x05, 0x42, 0x31, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, + 0x74, 0x61, 0x73, 0x6b, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_github_com_containerd_containerd_api_types_task_task_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_types_task_task_proto_rawDescData = file_github_com_containerd_containerd_api_types_task_task_proto_rawDesc +) + +func file_github_com_containerd_containerd_api_types_task_task_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_types_task_task_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_types_task_task_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_task_task_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_types_task_task_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_types_task_task_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_github_com_containerd_containerd_api_types_task_task_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_github_com_containerd_containerd_api_types_task_task_proto_goTypes = []interface{}{ + (Status)(0), // 0: containerd.v1.types.Status + (*Process)(nil), // 1: containerd.v1.types.Process + (*ProcessInfo)(nil), // 2: containerd.v1.types.ProcessInfo + (*timestamppb.Timestamp)(nil), // 3: google.protobuf.Timestamp + (*anypb.Any)(nil), // 4: google.protobuf.Any +} +var file_github_com_containerd_containerd_api_types_task_task_proto_depIdxs = []int32{ + 0, // 0: containerd.v1.types.Process.status:type_name -> containerd.v1.types.Status + 3, // 1: containerd.v1.types.Process.exited_at:type_name -> google.protobuf.Timestamp + 4, // 2: containerd.v1.types.ProcessInfo.info:type_name -> google.protobuf.Any + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_types_task_task_proto_init() } +func file_github_com_containerd_containerd_api_types_task_task_proto_init() { + if File_github_com_containerd_containerd_api_types_task_task_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_types_task_task_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Process); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTask - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTask - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthTask - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupTask + file_github_com_containerd_containerd_api_types_task_task_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProcessInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthTask - } - if depth == 0 { - return iNdEx, nil } } - return 0, io.ErrUnexpectedEOF + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_types_task_task_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_types_task_task_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_types_task_task_proto_depIdxs, + EnumInfos: file_github_com_containerd_containerd_api_types_task_task_proto_enumTypes, + MessageInfos: file_github_com_containerd_containerd_api_types_task_task_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_types_task_task_proto = out.File + file_github_com_containerd_containerd_api_types_task_task_proto_rawDesc = nil + file_github_com_containerd_containerd_api_types_task_task_proto_goTypes = nil + file_github_com_containerd_containerd_api_types_task_task_proto_depIdxs = nil } - -var ( - ErrInvalidLengthTask = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTask = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupTask = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/containerd/containerd/api/types/task/task.proto b/vendor/github.com/containerd/containerd/api/types/task/task.proto index df08dfd99d943..afc8e94bb4770 100644 --- a/vendor/github.com/containerd/containerd/api/types/task/task.proto +++ b/vendor/github.com/containerd/containerd/api/types/task/task.proto @@ -18,20 +18,18 @@ syntax = "proto3"; package containerd.v1.types; -import weak "gogoproto/gogo.proto"; import "google/protobuf/timestamp.proto"; import "google/protobuf/any.proto"; +option go_package = "github.com/containerd/containerd/api/types/task"; + enum Status { - option (gogoproto.goproto_enum_prefix) = false; - option (gogoproto.enum_customname) = "Status"; - - UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "StatusUnknown"]; - CREATED = 1 [(gogoproto.enumvalue_customname) = "StatusCreated"]; - RUNNING = 2 [(gogoproto.enumvalue_customname) = "StatusRunning"]; - STOPPED = 3 [(gogoproto.enumvalue_customname) = "StatusStopped"]; - PAUSED = 4 [(gogoproto.enumvalue_customname) = "StatusPaused"]; - PAUSING = 5 [(gogoproto.enumvalue_customname) = "StatusPausing"]; + UNKNOWN = 0; + CREATED = 1; + RUNNING = 2; + STOPPED = 3; + PAUSED = 4; + PAUSING = 5; } message Process { @@ -44,7 +42,7 @@ message Process { string stderr = 7; bool terminal = 8; uint32 exit_status = 9; - google.protobuf.Timestamp exited_at = 10 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; + google.protobuf.Timestamp exited_at = 10; } message ProcessInfo { diff --git a/vendor/github.com/containerd/containerd/api/types/transfer/doc.go b/vendor/github.com/containerd/containerd/api/types/transfer/doc.go new file mode 100644 index 0000000000000..82f94c2f15d78 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/transfer/doc.go @@ -0,0 +1,18 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package transfer defines the transfer types. +package transfer diff --git a/vendor/github.com/containerd/containerd/api/types/transfer/imagestore.pb.go b/vendor/github.com/containerd/containerd/api/types/transfer/imagestore.pb.go new file mode 100644 index 0000000000000..b3c9830411c5e --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/transfer/imagestore.pb.go @@ -0,0 +1,451 @@ +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 +// source: github.com/containerd/containerd/api/types/transfer/imagestore.proto + +package transfer + +import ( + types "github.com/containerd/containerd/api/types" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ImageStore struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Platforms []*types.Platform `protobuf:"bytes,3,rep,name=platforms,proto3" json:"platforms,omitempty"` + AllMetadata bool `protobuf:"varint,4,opt,name=all_metadata,json=allMetadata,proto3" json:"all_metadata,omitempty"` + ManifestLimit uint32 `protobuf:"varint,5,opt,name=manifest_limit,json=manifestLimit,proto3" json:"manifest_limit,omitempty"` + // extra_references are used to set image names on imports of sub-images from the index + ExtraReferences []*ImageReference `protobuf:"bytes,6,rep,name=extra_references,json=extraReferences,proto3" json:"extra_references,omitempty"` + Unpacks []*UnpackConfiguration `protobuf:"bytes,10,rep,name=unpacks,proto3" json:"unpacks,omitempty"` +} + +func (x *ImageStore) Reset() { + *x = ImageStore{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImageStore) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImageStore) ProtoMessage() {} + +func (x *ImageStore) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImageStore.ProtoReflect.Descriptor instead. +func (*ImageStore) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDescGZIP(), []int{0} +} + +func (x *ImageStore) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ImageStore) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *ImageStore) GetPlatforms() []*types.Platform { + if x != nil { + return x.Platforms + } + return nil +} + +func (x *ImageStore) GetAllMetadata() bool { + if x != nil { + return x.AllMetadata + } + return false +} + +func (x *ImageStore) GetManifestLimit() uint32 { + if x != nil { + return x.ManifestLimit + } + return 0 +} + +func (x *ImageStore) GetExtraReferences() []*ImageReference { + if x != nil { + return x.ExtraReferences + } + return nil +} + +func (x *ImageStore) GetUnpacks() []*UnpackConfiguration { + if x != nil { + return x.Unpacks + } + return nil +} + +type UnpackConfiguration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // platform is the platform to unpack for, used for resolving manifest and snapshotter + // if not provided + Platform *types.Platform `protobuf:"bytes,1,opt,name=platform,proto3" json:"platform,omitempty"` + // snapshotter to unpack to, if not provided default for platform shoudl be used + Snapshotter string `protobuf:"bytes,2,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"` +} + +func (x *UnpackConfiguration) Reset() { + *x = UnpackConfiguration{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UnpackConfiguration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UnpackConfiguration) ProtoMessage() {} + +func (x *UnpackConfiguration) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UnpackConfiguration.ProtoReflect.Descriptor instead. +func (*UnpackConfiguration) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDescGZIP(), []int{1} +} + +func (x *UnpackConfiguration) GetPlatform() *types.Platform { + if x != nil { + return x.Platform + } + return nil +} + +func (x *UnpackConfiguration) GetSnapshotter() string { + if x != nil { + return x.Snapshotter + } + return "" +} + +// ImageReference is used to create or find a reference for an image +type ImageReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // is_prefix determines whether the Name should be considered + // a prefix (without tag or digest). + // For lookup, this may allow matching multiple tags. + // For store, this must have a tag or digest added. + IsPrefix bool `protobuf:"varint,2,opt,name=is_prefix,json=isPrefix,proto3" json:"is_prefix,omitempty"` + // allow_overwrite allows overwriting or ignoring the name if + // another reference is provided (such as through an annotation). + // Only used if IsPrefix is true. + AllowOverwrite bool `protobuf:"varint,3,opt,name=allow_overwrite,json=allowOverwrite,proto3" json:"allow_overwrite,omitempty"` + // add_digest adds the manifest digest to the reference. + // For lookup, this allows matching tags with any digest. + // For store, this allows adding the digest to the name. + // Only used if IsPrefix is true. + AddDigest bool `protobuf:"varint,4,opt,name=add_digest,json=addDigest,proto3" json:"add_digest,omitempty"` + // skip_named_digest only considers digest references which do not + // have a non-digested named reference. + // For lookup, this will deduplicate digest references when there is a named match. + // For store, this only adds this digest reference when there is no matching full + // name reference from the prefix. + // Only used if IsPrefix is true. + SkipNamedDigest bool `protobuf:"varint,5,opt,name=skip_named_digest,json=skipNamedDigest,proto3" json:"skip_named_digest,omitempty"` +} + +func (x *ImageReference) Reset() { + *x = ImageReference{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImageReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImageReference) ProtoMessage() {} + +func (x *ImageReference) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImageReference.ProtoReflect.Descriptor instead. +func (*ImageReference) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDescGZIP(), []int{2} +} + +func (x *ImageReference) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ImageReference) GetIsPrefix() bool { + if x != nil { + return x.IsPrefix + } + return false +} + +func (x *ImageReference) GetAllowOverwrite() bool { + if x != nil { + return x.AllowOverwrite + } + return false +} + +func (x *ImageReference) GetAddDigest() bool { + if x != nil { + return x.AddDigest + } + return false +} + +func (x *ImageReference) GetSkipNamedDigest() bool { + if x != nil { + return x.SkipNamedDigest + } + return false +} + +var File_github_com_containerd_containerd_api_types_transfer_imagestore_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDesc = []byte{ + 0x0a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, + 0x72, 0x1a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x70, 0x6c, + 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xca, 0x03, 0x0a, + 0x0a, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x49, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x31, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2e, 0x49, 0x6d, 0x61, 0x67, + 0x65, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x70, 0x6c, + 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x09, 0x70, 0x6c, 0x61, 0x74, 0x66, + 0x6f, 0x72, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x6c, 0x6c, 0x5f, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x61, 0x6c, 0x6c, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x6e, 0x69, 0x66, + 0x65, 0x73, 0x74, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0d, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x54, + 0x0a, 0x10, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, + 0x73, 0x66, 0x65, 0x72, 0x2e, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, + 0x6e, 0x63, 0x65, 0x73, 0x12, 0x48, 0x0a, 0x07, 0x75, 0x6e, 0x70, 0x61, 0x63, 0x6b, 0x73, 0x18, + 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, + 0x72, 0x2e, 0x55, 0x6e, 0x70, 0x61, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x75, 0x6e, 0x70, 0x61, 0x63, 0x6b, 0x73, 0x1a, 0x39, + 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x6f, 0x0a, 0x13, 0x55, 0x6e, 0x70, + 0x61, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x36, 0x0a, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x08, + 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x74, 0x65, 0x72, 0x22, 0xb5, 0x01, 0x0a, 0x0e, 0x49, + 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x69, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x27, + 0x0a, 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4f, 0x76, + 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x64, 0x64, 0x5f, 0x64, + 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x64, 0x64, + 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x44, 0x69, 0x67, 0x65, + 0x73, 0x74, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDescData = file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDesc +) + +func file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_goTypes = []interface{}{ + (*ImageStore)(nil), // 0: containerd.types.transfer.ImageStore + (*UnpackConfiguration)(nil), // 1: containerd.types.transfer.UnpackConfiguration + (*ImageReference)(nil), // 2: containerd.types.transfer.ImageReference + nil, // 3: containerd.types.transfer.ImageStore.LabelsEntry + (*types.Platform)(nil), // 4: containerd.types.Platform +} +var file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_depIdxs = []int32{ + 3, // 0: containerd.types.transfer.ImageStore.labels:type_name -> containerd.types.transfer.ImageStore.LabelsEntry + 4, // 1: containerd.types.transfer.ImageStore.platforms:type_name -> containerd.types.Platform + 2, // 2: containerd.types.transfer.ImageStore.extra_references:type_name -> containerd.types.transfer.ImageReference + 1, // 3: containerd.types.transfer.ImageStore.unpacks:type_name -> containerd.types.transfer.UnpackConfiguration + 4, // 4: containerd.types.transfer.UnpackConfiguration.platform:type_name -> containerd.types.Platform + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_init() } +func file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_init() { + if File_github_com_containerd_containerd_api_types_transfer_imagestore_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImageStore); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UnpackConfiguration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImageReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_types_transfer_imagestore_proto = out.File + file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_rawDesc = nil + file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_goTypes = nil + file_github_com_containerd_containerd_api_types_transfer_imagestore_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/types/transfer/imagestore.proto b/vendor/github.com/containerd/containerd/api/types/transfer/imagestore.proto new file mode 100644 index 0000000000000..57ac2ebde5daa --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/transfer/imagestore.proto @@ -0,0 +1,82 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +syntax = "proto3"; + +package containerd.types.transfer; + +import "github.com/containerd/containerd/api/types/platform.proto"; + +option go_package = "github.com/containerd/containerd/api/types/transfer"; + +message ImageStore { + string name = 1; + map labels = 2; + + // Content filters + + repeated types.Platform platforms = 3; + bool all_metadata = 4; + uint32 manifest_limit = 5; + + // Import naming + + // extra_references are used to set image names on imports of sub-images from the index + repeated ImageReference extra_references = 6; + + // Unpack Configuration, multiple allowed + + repeated UnpackConfiguration unpacks = 10; +} + +message UnpackConfiguration { + // platform is the platform to unpack for, used for resolving manifest and snapshotter + // if not provided + types.Platform platform = 1; + + // snapshotter to unpack to, if not provided default for platform shoudl be used + string snapshotter = 2; +} + +// ImageReference is used to create or find a reference for an image +message ImageReference { + string name = 1; + + // is_prefix determines whether the Name should be considered + // a prefix (without tag or digest). + // For lookup, this may allow matching multiple tags. + // For store, this must have a tag or digest added. + bool is_prefix = 2; + + // allow_overwrite allows overwriting or ignoring the name if + // another reference is provided (such as through an annotation). + // Only used if IsPrefix is true. + bool allow_overwrite = 3; + + // add_digest adds the manifest digest to the reference. + // For lookup, this allows matching tags with any digest. + // For store, this allows adding the digest to the name. + // Only used if IsPrefix is true. + bool add_digest = 4; + + // skip_named_digest only considers digest references which do not + // have a non-digested named reference. + // For lookup, this will deduplicate digest references when there is a named match. + // For store, this only adds this digest reference when there is no matching full + // name reference from the prefix. + // Only used if IsPrefix is true. + bool skip_named_digest = 5; +} diff --git a/vendor/github.com/containerd/containerd/api/types/transfer/importexport.pb.go b/vendor/github.com/containerd/containerd/api/types/transfer/importexport.pb.go new file mode 100644 index 0000000000000..a2a48ac152aa0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/transfer/importexport.pb.go @@ -0,0 +1,320 @@ +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 +// source: github.com/containerd/containerd/api/types/transfer/importexport.proto + +package transfer + +import ( + types "github.com/containerd/containerd/api/types" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ImageImportStream struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Stream is used to identify the binary input stream for the import operation. + // The stream uses the transfer binary stream protocol with the client as the sender. + // The binary data is expected to be a raw tar stream. + Stream string `protobuf:"bytes,1,opt,name=stream,proto3" json:"stream,omitempty"` + MediaType string `protobuf:"bytes,2,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + ForceCompress bool `protobuf:"varint,3,opt,name=force_compress,json=forceCompress,proto3" json:"force_compress,omitempty"` +} + +func (x *ImageImportStream) Reset() { + *x = ImageImportStream{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_transfer_importexport_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImageImportStream) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImageImportStream) ProtoMessage() {} + +func (x *ImageImportStream) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_transfer_importexport_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImageImportStream.ProtoReflect.Descriptor instead. +func (*ImageImportStream) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDescGZIP(), []int{0} +} + +func (x *ImageImportStream) GetStream() string { + if x != nil { + return x.Stream + } + return "" +} + +func (x *ImageImportStream) GetMediaType() string { + if x != nil { + return x.MediaType + } + return "" +} + +func (x *ImageImportStream) GetForceCompress() bool { + if x != nil { + return x.ForceCompress + } + return false +} + +type ImageExportStream struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Stream is used to identify the binary output stream for the export operation. + // The stream uses the transfer binary stream protocol with the server as the sender. + // The binary data is expected to be a raw tar stream. + Stream string `protobuf:"bytes,1,opt,name=stream,proto3" json:"stream,omitempty"` + MediaType string `protobuf:"bytes,2,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + // The specified platforms + Platforms []*types.Platform `protobuf:"bytes,3,rep,name=platforms,proto3" json:"platforms,omitempty"` + // Whether to include all platforms + AllPlatforms bool `protobuf:"varint,4,opt,name=all_platforms,json=allPlatforms,proto3" json:"all_platforms,omitempty"` + // Skips the creation of the Docker compatible manifest.json file + SkipCompatibilityManifest bool `protobuf:"varint,5,opt,name=skip_compatibility_manifest,json=skipCompatibilityManifest,proto3" json:"skip_compatibility_manifest,omitempty"` + // Excludes non-distributable blobs such as Windows base layers. + SkipNonDistributable bool `protobuf:"varint,6,opt,name=skip_non_distributable,json=skipNonDistributable,proto3" json:"skip_non_distributable,omitempty"` +} + +func (x *ImageExportStream) Reset() { + *x = ImageExportStream{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_transfer_importexport_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ImageExportStream) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ImageExportStream) ProtoMessage() {} + +func (x *ImageExportStream) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_transfer_importexport_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ImageExportStream.ProtoReflect.Descriptor instead. +func (*ImageExportStream) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDescGZIP(), []int{1} +} + +func (x *ImageExportStream) GetStream() string { + if x != nil { + return x.Stream + } + return "" +} + +func (x *ImageExportStream) GetMediaType() string { + if x != nil { + return x.MediaType + } + return "" +} + +func (x *ImageExportStream) GetPlatforms() []*types.Platform { + if x != nil { + return x.Platforms + } + return nil +} + +func (x *ImageExportStream) GetAllPlatforms() bool { + if x != nil { + return x.AllPlatforms + } + return false +} + +func (x *ImageExportStream) GetSkipCompatibilityManifest() bool { + if x != nil { + return x.SkipCompatibilityManifest + } + return false +} + +func (x *ImageExportStream) GetSkipNonDistributable() bool { + if x != nil { + return x.SkipNonDistributable + } + return false +} + +var File_github_com_containerd_containerd_api_types_transfer_importexport_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDesc = []byte{ + 0x0a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x78, 0x70, 0x6f, + 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, + 0x66, 0x65, 0x72, 0x1a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, + 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x71, + 0x0a, 0x11, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, + 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x66, 0x6f, + 0x72, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0d, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, + 0x73, 0x22, 0x9f, 0x02, 0x0a, 0x11, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x45, 0x78, 0x70, 0x6f, 0x72, + 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, + 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, + 0x0a, 0x09, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x09, 0x70, + 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x5f, + 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0c, 0x61, 0x6c, 0x6c, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x73, 0x12, 0x3e, 0x0a, + 0x1b, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x69, 0x6c, + 0x69, 0x74, 0x79, 0x5f, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x19, 0x73, 0x6b, 0x69, 0x70, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, + 0x69, 0x6c, 0x69, 0x74, 0x79, 0x4d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, 0x74, 0x12, 0x34, 0x0a, + 0x16, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x6e, 0x6f, 0x6e, 0x5f, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x73, + 0x6b, 0x69, 0x70, 0x4e, 0x6f, 0x6e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDescData = file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDesc +) + +func file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_types_transfer_importexport_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_github_com_containerd_containerd_api_types_transfer_importexport_proto_goTypes = []interface{}{ + (*ImageImportStream)(nil), // 0: containerd.types.transfer.ImageImportStream + (*ImageExportStream)(nil), // 1: containerd.types.transfer.ImageExportStream + (*types.Platform)(nil), // 2: containerd.types.Platform +} +var file_github_com_containerd_containerd_api_types_transfer_importexport_proto_depIdxs = []int32{ + 2, // 0: containerd.types.transfer.ImageExportStream.platforms:type_name -> containerd.types.Platform + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_types_transfer_importexport_proto_init() } +func file_github_com_containerd_containerd_api_types_transfer_importexport_proto_init() { + if File_github_com_containerd_containerd_api_types_transfer_importexport_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_types_transfer_importexport_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImageImportStream); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_types_transfer_importexport_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ImageExportStream); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_types_transfer_importexport_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_types_transfer_importexport_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_types_transfer_importexport_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_types_transfer_importexport_proto = out.File + file_github_com_containerd_containerd_api_types_transfer_importexport_proto_rawDesc = nil + file_github_com_containerd_containerd_api_types_transfer_importexport_proto_goTypes = nil + file_github_com_containerd_containerd_api_types_transfer_importexport_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/types/transfer/importexport.proto b/vendor/github.com/containerd/containerd/api/types/transfer/importexport.proto new file mode 100644 index 0000000000000..c18bae1c6405d --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/transfer/importexport.proto @@ -0,0 +1,52 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +syntax = "proto3"; + +package containerd.types.transfer; + +option go_package = "github.com/containerd/containerd/api/types/transfer"; + +import "github.com/containerd/containerd/api/types/platform.proto"; + +message ImageImportStream { + // Stream is used to identify the binary input stream for the import operation. + // The stream uses the transfer binary stream protocol with the client as the sender. + // The binary data is expected to be a raw tar stream. + string stream = 1; + + string media_type = 2; + + bool force_compress = 3; +} + +message ImageExportStream { + // Stream is used to identify the binary output stream for the export operation. + // The stream uses the transfer binary stream protocol with the server as the sender. + // The binary data is expected to be a raw tar stream. + string stream = 1; + + string media_type = 2; + + // The specified platforms + repeated types.Platform platforms = 3; + // Whether to include all platforms + bool all_platforms = 4; + // Skips the creation of the Docker compatible manifest.json file + bool skip_compatibility_manifest = 5; + // Excludes non-distributable blobs such as Windows base layers. + bool skip_non_distributable = 6; +} diff --git a/vendor/github.com/containerd/containerd/api/types/transfer/progress.pb.go b/vendor/github.com/containerd/containerd/api/types/transfer/progress.pb.go new file mode 100644 index 0000000000000..62d12b34db379 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/transfer/progress.pb.go @@ -0,0 +1,202 @@ +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 +// source: github.com/containerd/containerd/api/types/transfer/progress.proto + +package transfer + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Progress struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Event string `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Parents []string `protobuf:"bytes,3,rep,name=parents,proto3" json:"parents,omitempty"` + Progress int64 `protobuf:"varint,4,opt,name=progress,proto3" json:"progress,omitempty"` + Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"` +} + +func (x *Progress) Reset() { + *x = Progress{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_transfer_progress_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Progress) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Progress) ProtoMessage() {} + +func (x *Progress) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_transfer_progress_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Progress.ProtoReflect.Descriptor instead. +func (*Progress) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDescGZIP(), []int{0} +} + +func (x *Progress) GetEvent() string { + if x != nil { + return x.Event + } + return "" +} + +func (x *Progress) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Progress) GetParents() []string { + if x != nil { + return x.Parents + } + return nil +} + +func (x *Progress) GetProgress() int64 { + if x != nil { + return x.Progress + } + return 0 +} + +func (x *Progress) GetTotal() int64 { + if x != nil { + return x.Total + } + return 0 +} + +var File_github_com_containerd_containerd_api_types_transfer_progress_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDesc = []byte{ + 0x0a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x22, + 0x80, 0x01, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, + 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05, + 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x74, + 0x61, 0x6c, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDescData = file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDesc +) + +func file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_types_transfer_progress_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_github_com_containerd_containerd_api_types_transfer_progress_proto_goTypes = []interface{}{ + (*Progress)(nil), // 0: containerd.types.transfer.Progress +} +var file_github_com_containerd_containerd_api_types_transfer_progress_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_types_transfer_progress_proto_init() } +func file_github_com_containerd_containerd_api_types_transfer_progress_proto_init() { + if File_github_com_containerd_containerd_api_types_transfer_progress_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_types_transfer_progress_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Progress); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_types_transfer_progress_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_types_transfer_progress_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_types_transfer_progress_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_types_transfer_progress_proto = out.File + file_github_com_containerd_containerd_api_types_transfer_progress_proto_rawDesc = nil + file_github_com_containerd_containerd_api_types_transfer_progress_proto_goTypes = nil + file_github_com_containerd_containerd_api_types_transfer_progress_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/types/transfer/progress.proto b/vendor/github.com/containerd/containerd/api/types/transfer/progress.proto new file mode 100644 index 0000000000000..3059bcbb12a44 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/transfer/progress.proto @@ -0,0 +1,29 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +syntax = "proto3"; + +package containerd.types.transfer; + +option go_package = "github.com/containerd/containerd/api/types/transfer"; + +message Progress { + string event = 1; + string name = 2; + repeated string parents = 3; + int64 progress = 4; + int64 total = 5; +} diff --git a/vendor/github.com/containerd/containerd/api/types/transfer/registry.pb.go b/vendor/github.com/containerd/containerd/api/types/transfer/registry.pb.go new file mode 100644 index 0000000000000..57bb80bae9e54 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/transfer/registry.pb.go @@ -0,0 +1,518 @@ +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 +// source: github.com/containerd/containerd/api/types/transfer/registry.proto + +package transfer + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type AuthType int32 + +const ( + AuthType_NONE AuthType = 0 + // CREDENTIALS is used to exchange username/password for access token + // using an oauth or "Docker Registry Token" server + AuthType_CREDENTIALS AuthType = 1 + // REFRESH is used to exchange secret for access token using an oauth + // or "Docker Registry Token" server + AuthType_REFRESH AuthType = 2 + // HEADER is used to set the HTTP Authorization header to secret + // directly for the registry. + // Value should be ` ` + AuthType_HEADER AuthType = 3 +) + +// Enum value maps for AuthType. +var ( + AuthType_name = map[int32]string{ + 0: "NONE", + 1: "CREDENTIALS", + 2: "REFRESH", + 3: "HEADER", + } + AuthType_value = map[string]int32{ + "NONE": 0, + "CREDENTIALS": 1, + "REFRESH": 2, + "HEADER": 3, + } +) + +func (x AuthType) Enum() *AuthType { + p := new(AuthType) + *p = x + return p +} + +func (x AuthType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AuthType) Descriptor() protoreflect.EnumDescriptor { + return file_github_com_containerd_containerd_api_types_transfer_registry_proto_enumTypes[0].Descriptor() +} + +func (AuthType) Type() protoreflect.EnumType { + return &file_github_com_containerd_containerd_api_types_transfer_registry_proto_enumTypes[0] +} + +func (x AuthType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AuthType.Descriptor instead. +func (AuthType) EnumDescriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescGZIP(), []int{0} +} + +type OCIRegistry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Reference string `protobuf:"bytes,1,opt,name=reference,proto3" json:"reference,omitempty"` + Resolver *RegistryResolver `protobuf:"bytes,2,opt,name=resolver,proto3" json:"resolver,omitempty"` +} + +func (x *OCIRegistry) Reset() { + *x = OCIRegistry{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OCIRegistry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OCIRegistry) ProtoMessage() {} + +func (x *OCIRegistry) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OCIRegistry.ProtoReflect.Descriptor instead. +func (*OCIRegistry) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescGZIP(), []int{0} +} + +func (x *OCIRegistry) GetReference() string { + if x != nil { + return x.Reference + } + return "" +} + +func (x *OCIRegistry) GetResolver() *RegistryResolver { + if x != nil { + return x.Resolver + } + return nil +} + +type RegistryResolver struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // auth_stream is used to refer to a stream which auth callbacks may be + // made on. + AuthStream string `protobuf:"bytes,1,opt,name=auth_stream,json=authStream,proto3" json:"auth_stream,omitempty"` + // Headers + Headers map[string]string `protobuf:"bytes,2,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *RegistryResolver) Reset() { + *x = RegistryResolver{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RegistryResolver) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RegistryResolver) ProtoMessage() {} + +func (x *RegistryResolver) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RegistryResolver.ProtoReflect.Descriptor instead. +func (*RegistryResolver) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescGZIP(), []int{1} +} + +func (x *RegistryResolver) GetAuthStream() string { + if x != nil { + return x.AuthStream + } + return "" +} + +func (x *RegistryResolver) GetHeaders() map[string]string { + if x != nil { + return x.Headers + } + return nil +} + +// AuthRequest is sent as a callback on a stream +type AuthRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // host is the registry host + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + // reference is the namespace and repository name requested from the registry + Reference string `protobuf:"bytes,2,opt,name=reference,proto3" json:"reference,omitempty"` + // wwwauthenticate is the HTTP WWW-Authenticate header values returned from the registry + Wwwauthenticate []string `protobuf:"bytes,3,rep,name=wwwauthenticate,proto3" json:"wwwauthenticate,omitempty"` +} + +func (x *AuthRequest) Reset() { + *x = AuthRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AuthRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuthRequest) ProtoMessage() {} + +func (x *AuthRequest) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuthRequest.ProtoReflect.Descriptor instead. +func (*AuthRequest) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescGZIP(), []int{2} +} + +func (x *AuthRequest) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (x *AuthRequest) GetReference() string { + if x != nil { + return x.Reference + } + return "" +} + +func (x *AuthRequest) GetWwwauthenticate() []string { + if x != nil { + return x.Wwwauthenticate + } + return nil +} + +type AuthResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AuthType AuthType `protobuf:"varint,1,opt,name=authType,proto3,enum=containerd.types.transfer.AuthType" json:"authType,omitempty"` + Secret string `protobuf:"bytes,2,opt,name=secret,proto3" json:"secret,omitempty"` + Username string `protobuf:"bytes,3,opt,name=username,proto3" json:"username,omitempty"` + ExpireAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=expire_at,json=expireAt,proto3" json:"expire_at,omitempty"` // TODO: Stream error +} + +func (x *AuthResponse) Reset() { + *x = AuthResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AuthResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AuthResponse) ProtoMessage() {} + +func (x *AuthResponse) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AuthResponse.ProtoReflect.Descriptor instead. +func (*AuthResponse) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescGZIP(), []int{3} +} + +func (x *AuthResponse) GetAuthType() AuthType { + if x != nil { + return x.AuthType + } + return AuthType_NONE +} + +func (x *AuthResponse) GetSecret() string { + if x != nil { + return x.Secret + } + return "" +} + +func (x *AuthResponse) GetUsername() string { + if x != nil { + return x.Username + } + return "" +} + +func (x *AuthResponse) GetExpireAt() *timestamppb.Timestamp { + if x != nil { + return x.ExpireAt + } + return nil +} + +var File_github_com_containerd_containerd_api_types_transfer_registry_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDesc = []byte{ + 0x0a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2f, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x74, 0x0a, 0x0b, 0x4f, 0x43, 0x49, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x72, 0x79, 0x12, + 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x47, 0x0a, + 0x08, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2b, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x52, 0x08, 0x72, 0x65, + 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x22, 0xc3, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x67, 0x69, 0x73, + 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x0b, 0x61, + 0x75, 0x74, 0x68, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x52, 0x0a, 0x07, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, + 0x72, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, + 0x1a, 0x3a, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x69, 0x0a, 0x0b, + 0x41, 0x75, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, + 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x28, 0x0a, + 0x0f, 0x77, 0x77, 0x77, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x77, 0x77, 0x77, 0x61, 0x75, 0x74, 0x68, 0x65, + 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x22, 0xbc, 0x01, 0x0a, 0x0c, 0x41, 0x75, 0x74, 0x68, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x08, 0x61, 0x75, 0x74, 0x68, + 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x08, 0x61, 0x75, 0x74, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x63, + 0x72, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, + 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, + 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x65, 0x78, + 0x70, 0x69, 0x72, 0x65, 0x41, 0x74, 0x2a, 0x3e, 0x0a, 0x08, 0x41, 0x75, 0x74, 0x68, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, + 0x43, 0x52, 0x45, 0x44, 0x45, 0x4e, 0x54, 0x49, 0x41, 0x4c, 0x53, 0x10, 0x01, 0x12, 0x0b, 0x0a, + 0x07, 0x52, 0x45, 0x46, 0x52, 0x45, 0x53, 0x48, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x48, 0x45, + 0x41, 0x44, 0x45, 0x52, 0x10, 0x03, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, + 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescData = file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDesc +) + +func file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_types_transfer_registry_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_github_com_containerd_containerd_api_types_transfer_registry_proto_goTypes = []interface{}{ + (AuthType)(0), // 0: containerd.types.transfer.AuthType + (*OCIRegistry)(nil), // 1: containerd.types.transfer.OCIRegistry + (*RegistryResolver)(nil), // 2: containerd.types.transfer.RegistryResolver + (*AuthRequest)(nil), // 3: containerd.types.transfer.AuthRequest + (*AuthResponse)(nil), // 4: containerd.types.transfer.AuthResponse + nil, // 5: containerd.types.transfer.RegistryResolver.HeadersEntry + (*timestamppb.Timestamp)(nil), // 6: google.protobuf.Timestamp +} +var file_github_com_containerd_containerd_api_types_transfer_registry_proto_depIdxs = []int32{ + 2, // 0: containerd.types.transfer.OCIRegistry.resolver:type_name -> containerd.types.transfer.RegistryResolver + 5, // 1: containerd.types.transfer.RegistryResolver.headers:type_name -> containerd.types.transfer.RegistryResolver.HeadersEntry + 0, // 2: containerd.types.transfer.AuthResponse.authType:type_name -> containerd.types.transfer.AuthType + 6, // 3: containerd.types.transfer.AuthResponse.expire_at:type_name -> google.protobuf.Timestamp + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_types_transfer_registry_proto_init() } +func file_github_com_containerd_containerd_api_types_transfer_registry_proto_init() { + if File_github_com_containerd_containerd_api_types_transfer_registry_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OCIRegistry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RegistryResolver); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuthRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AuthResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDesc, + NumEnums: 1, + NumMessages: 5, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_types_transfer_registry_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_types_transfer_registry_proto_depIdxs, + EnumInfos: file_github_com_containerd_containerd_api_types_transfer_registry_proto_enumTypes, + MessageInfos: file_github_com_containerd_containerd_api_types_transfer_registry_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_types_transfer_registry_proto = out.File + file_github_com_containerd_containerd_api_types_transfer_registry_proto_rawDesc = nil + file_github_com_containerd_containerd_api_types_transfer_registry_proto_goTypes = nil + file_github_com_containerd_containerd_api_types_transfer_registry_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/types/transfer/registry.proto b/vendor/github.com/containerd/containerd/api/types/transfer/registry.proto new file mode 100644 index 0000000000000..0b3ce68b4c4b5 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/transfer/registry.proto @@ -0,0 +1,79 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +syntax = "proto3"; + +package containerd.types.transfer; + +import "google/protobuf/timestamp.proto"; + +option go_package = "github.com/containerd/containerd/api/types/transfer"; + +message OCIRegistry { + string reference = 1; + RegistryResolver resolver = 2; +} + +message RegistryResolver { + // auth_stream is used to refer to a stream which auth callbacks may be + // made on. + string auth_stream = 1; + + // Headers + map headers = 2; + + // Allow custom hosts dir? + // Force skip verify + // Force HTTP + // CA callback? Client TLS callback? +} + +// AuthRequest is sent as a callback on a stream +message AuthRequest { + // host is the registry host + string host = 1; + + // reference is the namespace and repository name requested from the registry + string reference = 2; + + // wwwauthenticate is the HTTP WWW-Authenticate header values returned from the registry + repeated string wwwauthenticate = 3; +} + +enum AuthType { + NONE = 0; + + // CREDENTIALS is used to exchange username/password for access token + // using an oauth or "Docker Registry Token" server + CREDENTIALS = 1; + + // REFRESH is used to exchange secret for access token using an oauth + // or "Docker Registry Token" server + REFRESH = 2; + + // HEADER is used to set the HTTP Authorization header to secret + // directly for the registry. + // Value should be ` ` + HEADER = 3; +} + +message AuthResponse { + AuthType authType = 1; + string secret = 2; + string username = 3; + google.protobuf.Timestamp expire_at = 4; + // TODO: Stream error +} diff --git a/vendor/github.com/containerd/containerd/api/types/transfer/streaming.pb.go b/vendor/github.com/containerd/containerd/api/types/transfer/streaming.pb.go new file mode 100644 index 0000000000000..bdfa8d5d24125 --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/transfer/streaming.pb.go @@ -0,0 +1,226 @@ +// +//Copyright The containerd Authors. +// +//Licensed under the Apache License, Version 2.0 (the "License"); +//you may not use this file except in compliance with the License. +//You may obtain a copy of the License at +// +//http://www.apache.org/licenses/LICENSE-2.0 +// +//Unless required by applicable law or agreed to in writing, software +//distributed under the License is distributed on an "AS IS" BASIS, +//WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +//See the License for the specific language governing permissions and +//limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 +// source: github.com/containerd/containerd/api/types/transfer/streaming.proto + +package transfer + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Data struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *Data) Reset() { + *x = Data{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_transfer_streaming_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Data) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Data) ProtoMessage() {} + +func (x *Data) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_transfer_streaming_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Data.ProtoReflect.Descriptor instead. +func (*Data) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDescGZIP(), []int{0} +} + +func (x *Data) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +type WindowUpdate struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Update int32 `protobuf:"varint,1,opt,name=update,proto3" json:"update,omitempty"` +} + +func (x *WindowUpdate) Reset() { + *x = WindowUpdate{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_api_types_transfer_streaming_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WindowUpdate) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WindowUpdate) ProtoMessage() {} + +func (x *WindowUpdate) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_api_types_transfer_streaming_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WindowUpdate.ProtoReflect.Descriptor instead. +func (*WindowUpdate) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDescGZIP(), []int{1} +} + +func (x *WindowUpdate) GetUpdate() int32 { + if x != nil { + return x.Update + } + return 0 +} + +var File_github_com_containerd_containerd_api_types_transfer_streaming_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDesc = []byte{ + 0x0a, 0x43, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x72, 0x61, + 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, + 0x22, 0x1a, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x26, 0x0a, 0x0c, + 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, + 0x65, 0x73, 0x2f, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDescData = file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDesc +) + +func file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDescData) + }) + return file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDescData +} + +var file_github_com_containerd_containerd_api_types_transfer_streaming_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_github_com_containerd_containerd_api_types_transfer_streaming_proto_goTypes = []interface{}{ + (*Data)(nil), // 0: containerd.types.transfer.Data + (*WindowUpdate)(nil), // 1: containerd.types.transfer.WindowUpdate +} +var file_github_com_containerd_containerd_api_types_transfer_streaming_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_api_types_transfer_streaming_proto_init() } +func file_github_com_containerd_containerd_api_types_transfer_streaming_proto_init() { + if File_github_com_containerd_containerd_api_types_transfer_streaming_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_api_types_transfer_streaming_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Data); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_api_types_transfer_streaming_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WindowUpdate); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_api_types_transfer_streaming_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_api_types_transfer_streaming_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_api_types_transfer_streaming_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_api_types_transfer_streaming_proto = out.File + file_github_com_containerd_containerd_api_types_transfer_streaming_proto_rawDesc = nil + file_github_com_containerd_containerd_api_types_transfer_streaming_proto_goTypes = nil + file_github_com_containerd_containerd_api_types_transfer_streaming_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/api/types/transfer/streaming.proto b/vendor/github.com/containerd/containerd/api/types/transfer/streaming.proto new file mode 100644 index 0000000000000..234956c2c2c2f --- /dev/null +++ b/vendor/github.com/containerd/containerd/api/types/transfer/streaming.proto @@ -0,0 +1,29 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +syntax = "proto3"; + +package containerd.types.transfer; + +option go_package = "github.com/containerd/containerd/api/types/transfer"; + +message Data { + bytes data = 1; +} + +message WindowUpdate { + int32 update = 1; +} diff --git a/vendor/github.com/containerd/containerd/mount/subprocess_unsafe_linux.go b/vendor/github.com/containerd/containerd/archive/compression/compression_fuzzer.go similarity index 68% rename from vendor/github.com/containerd/containerd/mount/subprocess_unsafe_linux.go rename to vendor/github.com/containerd/containerd/archive/compression/compression_fuzzer.go index c7cb0c034343c..3516494ac0bdc 100644 --- a/vendor/github.com/containerd/containerd/mount/subprocess_unsafe_linux.go +++ b/vendor/github.com/containerd/containerd/archive/compression/compression_fuzzer.go @@ -1,3 +1,5 @@ +//go:build gofuzz + /* Copyright The containerd Authors. @@ -14,17 +16,13 @@ limitations under the License. */ -package mount +package compression import ( - _ "unsafe" // required for go:linkname. + "bytes" ) -//go:linkname beforeFork syscall.runtime_BeforeFork -func beforeFork() - -//go:linkname afterFork syscall.runtime_AfterFork -func afterFork() - -//go:linkname afterForkInChild syscall.runtime_AfterForkInChild -func afterForkInChild() +func FuzzDecompressStream(data []byte) int { + _, _ = DecompressStream(bytes.NewReader(data)) + return 1 +} diff --git a/vendor/github.com/containerd/containerd/sys/userns_deprecated.go b/vendor/github.com/containerd/containerd/archive/link_default.go similarity index 68% rename from vendor/github.com/containerd/containerd/sys/userns_deprecated.go rename to vendor/github.com/containerd/containerd/archive/link_default.go index 53acf55477e36..84a8997eb510d 100644 --- a/vendor/github.com/containerd/containerd/sys/userns_deprecated.go +++ b/vendor/github.com/containerd/containerd/archive/link_default.go @@ -1,3 +1,5 @@ +//go:build !freebsd + /* Copyright The containerd Authors. @@ -14,10 +16,10 @@ limitations under the License. */ -package sys +package archive -import "github.com/containerd/containerd/pkg/userns" +import "os" -// RunningInUserNS detects whether we are currently running in a user namespace. -// Deprecated: use github.com/containerd/containerd/pkg/userns.RunningInUserNS instead. -var RunningInUserNS = userns.RunningInUserNS +func link(oldname, newname string) error { + return os.Link(oldname, newname) +} diff --git a/vendor/github.com/containerd/containerd/archive/link_freebsd.go b/vendor/github.com/containerd/containerd/archive/link_freebsd.go new file mode 100644 index 0000000000000..2097db06bc7d6 --- /dev/null +++ b/vendor/github.com/containerd/containerd/archive/link_freebsd.go @@ -0,0 +1,82 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package archive + +import ( + "os" + "syscall" + + "golang.org/x/sys/unix" +) + +func link(oldname, newname string) error { + e := ignoringEINTR(func() error { + return unix.Linkat(unix.AT_FDCWD, oldname, unix.AT_FDCWD, newname, 0) + }) + if e != nil { + return &os.LinkError{Op: "link", Old: oldname, New: newname, Err: e} + } + return nil +} + +// The following contents were copied from Go 1.18.2. +// Use of this source code is governed by the following +// BSD-style license: +// +// Copyright (c) 2009 The Go Authors. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// ignoringEINTR makes a function call and repeats it if it returns an +// EINTR error. This appears to be required even though we install all +// signal handlers with SA_RESTART: see #22838, #38033, #38836, #40846. +// Also #20400 and #36644 are issues in which a signal handler is +// installed without setting SA_RESTART. None of these are the common case, +// but there are enough of them that it seems that we can't avoid +// an EINTR loop. +func ignoringEINTR(fn func() error) error { + for { + err := fn() + if err != syscall.EINTR { + return err + } + } +} diff --git a/vendor/github.com/containerd/containerd/archive/tar.go b/vendor/github.com/containerd/containerd/archive/tar.go index 44b7949531335..32edd3b3d5071 100644 --- a/vendor/github.com/containerd/containerd/archive/tar.go +++ b/vendor/github.com/containerd/containerd/archive/tar.go @@ -24,13 +24,13 @@ import ( "io" "os" "path/filepath" - "runtime" "strings" "sync" "syscall" "time" "github.com/containerd/containerd/log" + "github.com/containerd/containerd/pkg/epoch" "github.com/containerd/containerd/pkg/userns" "github.com/containerd/continuity/fs" ) @@ -51,11 +51,11 @@ var errInvalidArchive = errors.New("invalid archive") // files will be prepended with the prefix ".wh.". This style is // based off AUFS whiteouts. // See https://github.com/opencontainers/image-spec/blob/main/layer.md -func Diff(ctx context.Context, a, b string) io.ReadCloser { +func Diff(ctx context.Context, a, b string, opts ...WriteDiffOpt) io.ReadCloser { r, w := io.Pipe() go func() { - err := WriteDiff(ctx, w, a, b) + err := WriteDiff(ctx, w, a, b, opts...) if err != nil { log.G(ctx).WithError(err).Debugf("write diff failed") } @@ -81,6 +81,10 @@ func WriteDiff(ctx context.Context, w io.Writer, a, b string, opts ...WriteDiffO return fmt.Errorf("failed to apply option: %w", err) } } + if tm := epoch.FromContext(ctx); tm != nil && options.SourceDateEpoch == nil { + options.SourceDateEpoch = tm + } + if options.writeDiffFunc == nil { options.writeDiffFunc = writeDiffNaive } @@ -95,8 +99,14 @@ func WriteDiff(ctx context.Context, w io.Writer, a, b string, opts ...WriteDiffO // files will be prepended with the prefix ".wh.". This style is // based off AUFS whiteouts. // See https://github.com/opencontainers/image-spec/blob/main/layer.md -func writeDiffNaive(ctx context.Context, w io.Writer, a, b string, _ WriteDiffOptions) error { - cw := NewChangeWriter(w, b) +func writeDiffNaive(ctx context.Context, w io.Writer, a, b string, o WriteDiffOptions) error { + var opts []ChangeWriterOpt + if o.SourceDateEpoch != nil { + opts = append(opts, + WithModTimeUpperBound(*o.SourceDateEpoch), + WithWhiteoutTime(*o.SourceDateEpoch)) + } + cw := NewChangeWriter(w, b, opts...) err := fs.Changes(ctx, a, b, cw.HandleChange) if err != nil { return fmt.Errorf("failed to create diff tar stream: %w", err) @@ -290,7 +300,7 @@ func applyNaive(ctx context.Context, root string, r io.Reader, options ApplyOpti srcData := io.Reader(tr) srcHdr := hdr - if err := createTarFile(ctx, path, root, srcHdr, srcData); err != nil { + if err := createTarFile(ctx, path, root, srcHdr, srcData, options.NoSameOwner); err != nil { return 0, err } @@ -315,7 +325,7 @@ func applyNaive(ctx context.Context, root string, r io.Reader, options ApplyOpti return size, nil } -func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header, reader io.Reader) error { +func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header, reader io.Reader, noSameOwner bool) error { // hdr.Mode is in linux format, which we can use for syscalls, // but for os.Foo() calls we need the mode converted to os.FileMode, // so use hdrInfo.Mode() (they differ for e.g. setuid bits) @@ -331,6 +341,7 @@ func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header } } + //nolint:staticcheck // TypeRegA is deprecated but we may still receive an external tar with TypeRegA case tar.TypeReg, tar.TypeRegA: file, err := openFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, hdrInfo.Mode()) if err != nil { @@ -363,7 +374,7 @@ func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header return err } - if err := os.Link(targetPath, path); err != nil { + if err := link(targetPath, path); err != nil { return err } @@ -380,8 +391,7 @@ func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) } - // Lchown is not supported on Windows. - if runtime.GOOS != "windows" { + if !noSameOwner { if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil { err = fmt.Errorf("failed to Lchown %q for UID %d, GID %d: %w", path, hdr.Uid, hdr.Gid, err) if errors.Is(err, syscall.EINVAL) && userns.RunningInUserNS() { @@ -491,26 +501,48 @@ func mkparent(ctx context.Context, path, root string, parents []string) error { // See also https://github.com/opencontainers/image-spec/blob/main/layer.md for details // about OCI layers type ChangeWriter struct { - tw *tar.Writer - source string - whiteoutT time.Time - inodeSrc map[uint64]string - inodeRefs map[uint64][]string - addedDirs map[string]struct{} + tw *tar.Writer + source string + modTimeUpperBound *time.Time + whiteoutT time.Time + inodeSrc map[uint64]string + inodeRefs map[uint64][]string + addedDirs map[string]struct{} +} + +// ChangeWriterOpt can be specified in NewChangeWriter. +type ChangeWriterOpt func(cw *ChangeWriter) + +// WithModTimeUpperBound sets the mod time upper bound. +func WithModTimeUpperBound(tm time.Time) ChangeWriterOpt { + return func(cw *ChangeWriter) { + cw.modTimeUpperBound = &tm + } +} + +// WithWhiteoutTime sets the whiteout timestamp. +func WithWhiteoutTime(tm time.Time) ChangeWriterOpt { + return func(cw *ChangeWriter) { + cw.whiteoutT = tm + } } // NewChangeWriter returns ChangeWriter that writes tar stream of the source directory // to the privided writer. Change information (add/modify/delete/unmodified) for each // file needs to be passed through HandleChange method. -func NewChangeWriter(w io.Writer, source string) *ChangeWriter { - return &ChangeWriter{ +func NewChangeWriter(w io.Writer, source string, opts ...ChangeWriterOpt) *ChangeWriter { + cw := &ChangeWriter{ tw: tar.NewWriter(w), source: source, - whiteoutT: time.Now(), + whiteoutT: time.Now(), // can be overridden with WithWhiteoutTime(time.Time) ChangeWriterOpt . inodeSrc: map[uint64]string{}, inodeRefs: map[uint64][]string{}, addedDirs: map[string]struct{}{}, } + for _, o := range opts { + o(cw) + } + return cw } // HandleChange receives filesystem change information and reflect that information to @@ -563,6 +595,9 @@ func (cw *ChangeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e // truncate timestamp for compatibility. without PAX stdlib rounds timestamps instead hdr.Format = tar.FormatPAX + if cw.modTimeUpperBound != nil && hdr.ModTime.After(*cw.modTimeUpperBound) { + hdr.ModTime = *cw.modTimeUpperBound + } hdr.ModTime = hdr.ModTime.Truncate(time.Second) hdr.AccessTime = time.Time{} hdr.ChangeTime = time.Time{} @@ -574,11 +609,9 @@ func (cw *ChangeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e return fmt.Errorf("failed to make path relative: %w", err) } } - name, err = tarName(name) - if err != nil { - return fmt.Errorf("cannot canonicalize path: %w", err) - } - // suffix with '/' for directories + // Canonicalize to POSIX-style paths using forward slashes. Directory + // entries must end with a slash. + name = filepath.ToSlash(name) if f.IsDir() && !strings.HasSuffix(name, "/") { name += "/" } diff --git a/vendor/github.com/containerd/containerd/archive/tar_mostunix.go b/vendor/github.com/containerd/containerd/archive/tar_mostunix.go index d2d970356c3cd..ca5e602a03119 100644 --- a/vendor/github.com/containerd/containerd/archive/tar_mostunix.go +++ b/vendor/github.com/containerd/containerd/archive/tar_mostunix.go @@ -1,5 +1,4 @@ //go:build !windows && !freebsd -// +build !windows,!freebsd /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/archive/tar_opts.go b/vendor/github.com/containerd/containerd/archive/tar_opts.go index 58985555a50a6..ac3a03c8d4a5b 100644 --- a/vendor/github.com/containerd/containerd/archive/tar_opts.go +++ b/vendor/github.com/containerd/containerd/archive/tar_opts.go @@ -20,6 +20,7 @@ import ( "archive/tar" "context" "io" + "time" ) // ApplyOptions provides additional options for an Apply operation @@ -27,6 +28,7 @@ type ApplyOptions struct { Filter Filter // Filter tar headers ConvertWhiteout ConvertWhiteout // Convert whiteout files Parents []string // Parent directories to handle inherited attributes without CoW + NoSameOwner bool // NoSameOwner will not attempt to preserve the owner specified in the tar archive. applyFunc func(context.Context, string, io.Reader, ApplyOptions) (int64, error) } @@ -61,6 +63,15 @@ func WithConvertWhiteout(c ConvertWhiteout) ApplyOpt { } } +// WithNoSameOwner is same as '--no-same-owner` in 'tar' command. +// It'll skip attempt to preserve the owner specified in the tar archive. +func WithNoSameOwner() ApplyOpt { + return func(options *ApplyOptions) error { + options.NoSameOwner = true + return nil + } +} + // WithParents provides parent directories for resolving inherited attributes // directory from the filesystem. // Inherited attributes are searched from first to last, making the first @@ -79,7 +90,22 @@ type WriteDiffOptions struct { ParentLayers []string // Windows needs the full list of parent layers writeDiffFunc func(context.Context, io.Writer, string, string, WriteDiffOptions) error + + // SourceDateEpoch specifies the following timestamps to provide control for reproducibility. + // - The upper bound timestamp of the diff contents + // - The timestamp of the whiteouts + // + // See also https://reproducible-builds.org/docs/source-date-epoch/ . + SourceDateEpoch *time.Time } // WriteDiffOpt allows setting mutable archive write properties on creation type WriteDiffOpt func(options *WriteDiffOptions) error + +// WithSourceDateEpoch specifies the SOURCE_DATE_EPOCH without touching the env vars. +func WithSourceDateEpoch(tm *time.Time) WriteDiffOpt { + return func(options *WriteDiffOptions) error { + options.SourceDateEpoch = tm + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/archive/tar_unix.go b/vendor/github.com/containerd/containerd/archive/tar_unix.go index 854afcf0adb4c..8e883eee62750 100644 --- a/vendor/github.com/containerd/containerd/archive/tar_unix.go +++ b/vendor/github.com/containerd/containerd/archive/tar_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. @@ -34,10 +33,6 @@ import ( "golang.org/x/sys/unix" ) -func tarName(p string) (string, error) { - return p, nil -} - func chmodTarEntry(perm os.FileMode) os.FileMode { return perm } @@ -62,8 +57,7 @@ func setHeaderForSpecialDevice(hdr *tar.Header, name string, fi os.FileInfo) err return errors.New("unsupported stat type") } - // Rdev is int32 on darwin/bsd, int64 on linux/solaris - rdev := uint64(s.Rdev) //nolint:unconvert + rdev := uint64(s.Rdev) //nolint:nolintlint,unconvert // rdev is int32 on darwin/bsd, int64 on linux/solaris // Currently go does not fill in the major/minors if s.Mode&syscall.S_IFBLK != 0 || diff --git a/vendor/github.com/containerd/containerd/archive/tar_windows.go b/vendor/github.com/containerd/containerd/archive/tar_windows.go index 4b71c1e307260..dc8f64ee41248 100644 --- a/vendor/github.com/containerd/containerd/archive/tar_windows.go +++ b/vendor/github.com/containerd/containerd/archive/tar_windows.go @@ -23,24 +23,9 @@ import ( "os" "strings" - "github.com/containerd/containerd/sys" + "github.com/moby/sys/sequential" ) -// tarName returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func tarName(p string) (string, error) { - // windows: convert windows style relative path with backslashes - // into forward slashes. Since windows does not allow '/' or '\' - // in file names, it is mostly safe to replace however we must - // check just in case - if strings.Contains(p, "/") { - return "", fmt.Errorf("windows path contains forward slash: %s", p) - } - - return strings.Replace(p, string(os.PathSeparator), "/", -1), nil -} - // chmodTarEntry is used to adjust the file permissions used in tar header based // on the platform the archival is done. func chmodTarEntry(perm os.FileMode) os.FileMode { @@ -57,15 +42,15 @@ func setHeaderForSpecialDevice(*tar.Header, string, os.FileInfo) error { } func open(p string) (*os.File, error) { - // We use sys.OpenSequential to ensure we use sequential file - // access on Windows to avoid depleting the standby list. - return sys.OpenSequential(p) + // We use sequential file access to avoid depleting the standby list on + // Windows. + return sequential.Open(p) } func openFile(name string, flag int, perm os.FileMode) (*os.File, error) { - // Source is regular file. We use sys.OpenFileSequential to use sequential - // file access to avoid depleting the standby list on Windows. - return sys.OpenFileSequential(name, flag, perm) + // Source is regular file. We use sequential file access to avoid depleting + // the standby list on Windows. + return sequential.OpenFile(name, flag, perm) } func mkdir(path string, perm os.FileMode) error { diff --git a/vendor/github.com/containerd/containerd/archive/time_unix.go b/vendor/github.com/containerd/containerd/archive/time_unix.go index 043e374538cea..b5a10d528fa15 100644 --- a/vendor/github.com/containerd/containerd/archive/time_unix.go +++ b/vendor/github.com/containerd/containerd/archive/time_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/archive/time_windows.go b/vendor/github.com/containerd/containerd/archive/time_windows.go index 71f397821a618..d906a3e5e7643 100644 --- a/vendor/github.com/containerd/containerd/archive/time_windows.go +++ b/vendor/github.com/containerd/containerd/archive/time_windows.go @@ -25,18 +25,17 @@ import ( // chtimes will set the create time on a file using the given modtime. // This requires calling SetFileTime and explicitly including the create time. func chtimes(path string, atime, mtime time.Time) error { - ctimespec := windows.NsecToTimespec(mtime.UnixNano()) - pathp, e := windows.UTF16PtrFromString(path) - if e != nil { - return e + pathp, err := windows.UTF16PtrFromString(path) + if err != nil { + return err } - h, e := windows.CreateFile(pathp, + h, err := windows.CreateFile(pathp, windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) - if e != nil { - return e + if err != nil { + return err } defer windows.Close(h) - c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec)) + c := windows.NsecToFiletime(mtime.UnixNano()) return windows.SetFileTime(h, &c, nil, nil) } diff --git a/vendor/github.com/containerd/containerd/cio/io.go b/vendor/github.com/containerd/containerd/cio/io.go index 8ee13edda4a37..f85ec174d115c 100644 --- a/vendor/github.com/containerd/containerd/cio/io.go +++ b/vendor/github.com/containerd/containerd/cio/io.go @@ -18,7 +18,6 @@ package cio import ( "context" - "errors" "fmt" "io" "net/url" @@ -302,14 +301,21 @@ func LogFile(path string) Creator { // LogURIGenerator is the helper to generate log uri with specific scheme. func LogURIGenerator(scheme string, path string, args map[string]string) (*url.URL, error) { path = filepath.Clean(path) - if !strings.HasPrefix(path, "/") { - return nil, errors.New("absolute path needed") + if !filepath.IsAbs(path) { + return nil, fmt.Errorf("%q must be absolute", path) } - uri := &url.URL{ - Scheme: scheme, - Path: path, + // Without adding / here, C:\foo\bar.txt will become file://C:/foo/bar.txt + // which is invalid. The path must have three slashes. + // + // https://learn.microsoft.com/en-us/archive/blogs/ie/file-uris-in-windows + // > In the case of a local Windows file path, there is no hostname, + // > and thus another slash and the path immediately follow. + p := filepath.ToSlash(path) + if !strings.HasPrefix(path, "/") { + p = "/" + p } + uri := &url.URL{Scheme: scheme, Path: p} if len(args) == 0 { return uri, nil diff --git a/vendor/github.com/containerd/containerd/cio/io_unix.go b/vendor/github.com/containerd/containerd/cio/io_unix.go index 5606cc88a9420..fddbe58c5f75c 100644 --- a/vendor/github.com/containerd/containerd/cio/io_unix.go +++ b/vendor/github.com/containerd/containerd/cio/io_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/client.go b/vendor/github.com/containerd/containerd/client.go index 1c2202e1ecdbd..a62217b961410 100644 --- a/vendor/github.com/containerd/containerd/client.go +++ b/vendor/github.com/containerd/containerd/client.go @@ -35,6 +35,7 @@ import ( introspectionapi "github.com/containerd/containerd/api/services/introspection/v1" leasesapi "github.com/containerd/containerd/api/services/leases/v1" namespacesapi "github.com/containerd/containerd/api/services/namespaces/v1" + sandboxsapi "github.com/containerd/containerd/api/services/sandbox/v1" snapshotsapi "github.com/containerd/containerd/api/services/snapshots/v1" "github.com/containerd/containerd/api/services/tasks/v1" versionservice "github.com/containerd/containerd/api/services/version/v1" @@ -52,15 +53,17 @@ import ( "github.com/containerd/containerd/pkg/dialer" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/plugin" + ptypes "github.com/containerd/containerd/protobuf/types" "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" + "github.com/containerd/containerd/sandbox" + sandboxproxy "github.com/containerd/containerd/sandbox/proxy" "github.com/containerd/containerd/services/introspection" "github.com/containerd/containerd/snapshots" snproxy "github.com/containerd/containerd/snapshots/proxy" - "github.com/containerd/typeurl" - ptypes "github.com/gogo/protobuf/types" + "github.com/containerd/typeurl/v2" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/runtime-spec/specs-go" "golang.org/x/sync/semaphore" "google.golang.org/grpc" "google.golang.org/grpc/backoff" @@ -185,6 +188,12 @@ func NewWithConn(conn *grpc.ClientConn, opts ...ClientOpt) (*Client, error) { runtime: fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS), } + if copts.defaultPlatform != nil { + c.platform = copts.defaultPlatform + } else { + c.platform = platforms.Default() + } + // check namespace labels for default runtime if copts.defaultRuntime == "" && c.defaultns != "" { if label, err := c.GetLabel(context.Background(), defaults.DefaultRuntimeNSLabel); err != nil { @@ -346,6 +355,8 @@ type RemoteContext struct { // ConvertSchema1 is whether to convert Docker registry schema 1 // manifests. If this option is false then any image which resolves // to schema 1 will return an error since schema 1 is not supported. + // + // Deprecated: use Schema 2 or OCI images. ConvertSchema1 bool // Platforms defines which platforms to handle when doing the image operation. @@ -621,6 +632,11 @@ func (c *Client) SnapshotService(snapshotterName string) snapshots.Snapshotter { return snproxy.NewSnapshotter(snapshotsapi.NewSnapshotsClient(c.conn), snapshotterName) } +// DefaultNamespace return the default namespace +func (c *Client) DefaultNamespace() string { + return c.defaultns +} + // TaskService returns the underlying TasksClient func (c *Client) TaskService() tasks.TasksClient { if c.taskService != nil { @@ -688,6 +704,26 @@ func (c *Client) EventService() EventService { return NewEventServiceFromClient(eventsapi.NewEventsClient(c.conn)) } +// SandboxStore returns the underlying sandbox store client +func (c *Client) SandboxStore() sandbox.Store { + if c.sandboxStore != nil { + return c.sandboxStore + } + c.connMu.Lock() + defer c.connMu.Unlock() + return sandboxproxy.NewSandboxStore(sandboxsapi.NewStoreClient(c.conn)) +} + +// SandboxController returns the underlying sandbox controller client +func (c *Client) SandboxController() sandbox.Controller { + if c.sandboxController != nil { + return c.sandboxController + } + c.connMu.Lock() + defer c.connMu.Unlock() + return sandboxproxy.NewSandboxController(sandboxsapi.NewControllerClient(c.conn)) +} + // VersionService returns the underlying VersionClient func (c *Client) VersionService() versionservice.VersionClient { c.connMu.Lock() @@ -819,7 +855,7 @@ func (c *Client) GetSnapshotterSupportedPlatforms(ctx context.Context, snapshott return platforms.Any(snPlatforms...), nil } -func toPlatforms(pt []apitypes.Platform) []ocispec.Platform { +func toPlatforms(pt []*apitypes.Platform) []ocispec.Platform { platforms := make([]ocispec.Platform, len(pt)) for i, p := range pt { platforms[i] = ocispec.Platform{ @@ -830,3 +866,21 @@ func toPlatforms(pt []apitypes.Platform) []ocispec.Platform { } return platforms } + +// GetSnapshotterCapabilities returns the capabilities of a snapshotter. +func (c *Client) GetSnapshotterCapabilities(ctx context.Context, snapshotterName string) ([]string, error) { + filters := []string{fmt.Sprintf("type==%s, id==%s", plugin.SnapshotPlugin, snapshotterName)} + in := c.IntrospectionService() + + resp, err := in.Plugins(ctx, filters) + if err != nil { + return nil, err + } + + if len(resp.Plugins) <= 0 { + return nil, fmt.Errorf("inspection service could not find snapshotter %s plugin", snapshotterName) + } + + sn := resp.Plugins[0] + return sn.Capabilities, nil +} diff --git a/vendor/github.com/containerd/containerd/client_opts.go b/vendor/github.com/containerd/containerd/client_opts.go index 2ef7575d8a6c6..4e0a78a8a31de 100644 --- a/vendor/github.com/containerd/containerd/client_opts.go +++ b/vendor/github.com/containerd/containerd/client_opts.go @@ -200,6 +200,8 @@ func WithChildLabelMap(fn func(ocispec.Descriptor) []string) RemoteOpt { // WithSchema1Conversion is used to convert Docker registry schema 1 // manifests to oci manifests on pull. Without this option schema 1 // manifests will return a not supported error. +// +// Deprecated: use Schema 2 or OCI images. func WithSchema1Conversion(client *Client, c *RemoteContext) error { c.ConvertSchema1 = true return nil diff --git a/vendor/github.com/containerd/containerd/container.go b/vendor/github.com/containerd/containerd/container.go index 7d8d674c89da8..2890bfdc4344c 100644 --- a/vendor/github.com/containerd/containerd/container.go +++ b/vendor/github.com/containerd/containerd/container.go @@ -32,10 +32,10 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/oci" + "github.com/containerd/containerd/protobuf" "github.com/containerd/containerd/runtime/v2/runc/options" "github.com/containerd/fifo" - "github.com/containerd/typeurl" - prototypes "github.com/gogo/protobuf/types" + "github.com/containerd/typeurl/v2" ver "github.com/opencontainers/image-spec/specs-go" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/opencontainers/selinux/go-selinux/label" @@ -74,7 +74,7 @@ type Container interface { // SetLabels sets the provided labels for the container and returns the final label set SetLabels(context.Context, map[string]string) (map[string]string, error) // Extensions returns the extensions set on the container - Extensions(context.Context) (map[string]prototypes.Any, error) + Extensions(context.Context) (map[string]typeurl.Any, error) // Update a container Update(context.Context, ...UpdateContainerOpts) error // Checkpoint creates a checkpoint image of the current container @@ -120,7 +120,7 @@ func (c *container) Info(ctx context.Context, opts ...InfoOpts) (containers.Cont return c.metadata, nil } -func (c *container) Extensions(ctx context.Context) (map[string]prototypes.Any, error) { +func (c *container) Extensions(ctx context.Context) (map[string]typeurl.Any, error) { r, err := c.get(ctx) if err != nil { return nil, err @@ -163,7 +163,7 @@ func (c *container) Spec(ctx context.Context) (*oci.Spec, error) { return nil, err } var s oci.Spec - if err := json.Unmarshal(r.Spec.Value, &s); err != nil { + if err := json.Unmarshal(r.Spec.GetValue(), &s); err != nil { return nil, err } return &s, nil @@ -258,6 +258,7 @@ func (c *container) NewTask(ctx context.Context, ioCreate cio.Creator, opts ...N request.Rootfs = append(request.Rootfs, &types.Mount{ Type: m.Type, Source: m.Source, + Target: m.Target, Options: m.Options, }) } @@ -275,6 +276,7 @@ func (c *container) NewTask(ctx context.Context, ioCreate cio.Creator, opts ...N request.Rootfs = append(request.Rootfs, &types.Mount{ Type: m.Type, Source: m.Source, + Target: m.Target, Options: m.Options, }) } @@ -284,7 +286,7 @@ func (c *container) NewTask(ctx context.Context, ioCreate cio.Creator, opts ...N if err != nil { return nil, err } - request.Options = any + request.Options = protobuf.FromAny(any) } t := &task{ client: c.client, @@ -396,7 +398,7 @@ func (c *container) loadTask(ctx context.Context, ioAttach cio.Attach) (Task, er return nil, err } var i cio.IO - if ioAttach != nil && response.Process.Status != tasktypes.StatusUnknown { + if ioAttach != nil && response.Process.Status != tasktypes.Status_UNKNOWN { // Do not attach IO for task in unknown state, because there // are no fifo paths anyway. if i, err = attachExistingIO(response, ioAttach); err != nil { diff --git a/vendor/github.com/containerd/containerd/container_checkpoint_opts.go b/vendor/github.com/containerd/containerd/container_checkpoint_opts.go index a64ef618ba62b..64f23823d2804 100644 --- a/vendor/github.com/containerd/containerd/container_checkpoint_opts.go +++ b/vendor/github.com/containerd/containerd/container_checkpoint_opts.go @@ -28,9 +28,11 @@ import ( "github.com/containerd/containerd/diff" "github.com/containerd/containerd/images" "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/protobuf" + "github.com/containerd/containerd/protobuf/proto" "github.com/containerd/containerd/rootfs" "github.com/containerd/containerd/runtime/v2/runc/options" - "github.com/containerd/typeurl" + "github.com/opencontainers/go-digest" imagespec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -56,7 +58,7 @@ func WithCheckpointImage(ctx context.Context, client *Client, c *containers.Cont // WithCheckpointTask includes the running task func WithCheckpointTask(ctx context.Context, client *Client, c *containers.Container, index *imagespec.Index, copts *options.CheckpointOptions) error { - any, err := typeurl.MarshalAny(copts) + any, err := protobuf.MarshalAnyToProto(copts) if err != nil { return nil } @@ -71,14 +73,14 @@ func WithCheckpointTask(ctx context.Context, client *Client, c *containers.Conta platformSpec := platforms.DefaultSpec() index.Manifests = append(index.Manifests, imagespec.Descriptor{ MediaType: d.MediaType, - Size: d.Size_, - Digest: d.Digest, + Size: d.Size, + Digest: digest.Digest(d.Digest), Platform: &platformSpec, Annotations: d.Annotations, }) } // save copts - data, err := any.Marshal() + data, err := proto.Marshal(any) if err != nil { return err } @@ -97,8 +99,9 @@ func WithCheckpointTask(ctx context.Context, client *Client, c *containers.Conta // WithCheckpointRuntime includes the container runtime info func WithCheckpointRuntime(ctx context.Context, client *Client, c *containers.Container, index *imagespec.Index, copts *options.CheckpointOptions) error { - if c.Runtime.Options != nil { - data, err := c.Runtime.Options.Marshal() + if c.Runtime.Options != nil && c.Runtime.Options.GetValue() != nil { + any := protobuf.FromAny(c.Runtime.Options) + data, err := proto.Marshal(any) if err != nil { return err } diff --git a/vendor/github.com/containerd/containerd/container_opts.go b/vendor/github.com/containerd/containerd/container_opts.go index 4d630ea6c9d2e..4a937032f5661 100644 --- a/vendor/github.com/containerd/containerd/container_opts.go +++ b/vendor/github.com/containerd/containerd/container_opts.go @@ -26,10 +26,11 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" + "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/oci" + "github.com/containerd/containerd/protobuf" "github.com/containerd/containerd/snapshots" - "github.com/containerd/typeurl" - "github.com/gogo/protobuf/types" + "github.com/containerd/typeurl/v2" "github.com/opencontainers/image-spec/identity" v1 "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -57,7 +58,7 @@ type InfoConfig struct { func WithRuntime(name string, options interface{}) NewContainerOpts { return func(ctx context.Context, client *Client, c *containers.Container) error { var ( - any *types.Any + any typeurl.Any err error ) if options != nil { @@ -74,6 +75,15 @@ func WithRuntime(name string, options interface{}) NewContainerOpts { } } +// WithSandbox joins the container to a container group (aka sandbox) from the given ID +// Note: shim runtime must support sandboxes environments. +func WithSandbox(sandboxID string) NewContainerOpts { + return func(ctx context.Context, client *Client, c *containers.Container) error { + c.SandboxID = sandboxID + return nil + } +} + // WithImage sets the provided image as the base for the container func WithImage(i Image) NewContainerOpts { return func(ctx context.Context, client *Client, c *containers.Container) error { @@ -214,6 +224,11 @@ func WithNewSnapshot(id string, i Image, opts ...snapshots.Opt) NewContainerOpts if err != nil { return err } + + parent, err = resolveSnapshotOptions(ctx, client, c.Snapshotter, s, parent, opts...) + if err != nil { + return err + } if _, err := s.Prepare(ctx, id, parent, opts...); err != nil { return err } @@ -258,6 +273,11 @@ func WithNewSnapshotView(id string, i Image, opts ...snapshots.Opt) NewContainer if err != nil { return err } + + parent, err = resolveSnapshotOptions(ctx, client, c.Snapshotter, s, parent, opts...) + if err != nil { + return err + } if _, err := s.View(ctx, id, parent, opts...); err != nil { return err } @@ -288,9 +308,9 @@ func WithContainerExtension(name string, extension interface{}) NewContainerOpts } if c.Extensions == nil { - c.Extensions = make(map[string]types.Any) + c.Extensions = make(map[string]typeurl.Any) } - c.Extensions[name] = *any + c.Extensions[name] = any return nil } } @@ -298,6 +318,9 @@ func WithContainerExtension(name string, extension interface{}) NewContainerOpts // WithNewSpec generates a new spec for a new container func WithNewSpec(opts ...oci.SpecOpts) NewContainerOpts { return func(ctx context.Context, client *Client, c *containers.Container) error { + if _, ok := namespaces.Namespace(ctx); !ok { + ctx = namespaces.WithNamespace(ctx, client.DefaultNamespace()) + } s, err := oci.GenerateSpec(ctx, client, c, opts...) if err != nil { return err @@ -315,7 +338,7 @@ func WithSpec(s *oci.Spec, opts ...oci.SpecOpts) NewContainerOpts { } var err error - c.Spec, err = typeurl.MarshalAny(s) + c.Spec, err = protobuf.MarshalAnyToProto(s) return err } } diff --git a/vendor/github.com/containerd/containerd/container_opts_unix.go b/vendor/github.com/containerd/containerd/container_opts_unix.go index b6fc37db92711..016c1a9258d93 100644 --- a/vendor/github.com/containerd/containerd/container_opts_unix.go +++ b/vendor/github.com/containerd/containerd/container_opts_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/container_restore_opts.go b/vendor/github.com/containerd/containerd/container_restore_opts.go index bdc8650cda261..2afc18701386b 100644 --- a/vendor/github.com/containerd/containerd/container_restore_opts.go +++ b/vendor/github.com/containerd/containerd/container_restore_opts.go @@ -24,8 +24,8 @@ import ( "github.com/containerd/containerd/containers" "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" - "github.com/gogo/protobuf/proto" - ptypes "github.com/gogo/protobuf/types" + "github.com/containerd/containerd/protobuf/proto" + ptypes "github.com/containerd/containerd/protobuf/types" "github.com/opencontainers/image-spec/identity" imagespec "github.com/opencontainers/image-spec/specs-go/v1" ) diff --git a/vendor/github.com/containerd/containerd/containerd.service b/vendor/github.com/containerd/containerd/containerd.service index e4c082b3a4e73..38a3459456540 100644 --- a/vendor/github.com/containerd/containerd/containerd.service +++ b/vendor/github.com/containerd/containerd/containerd.service @@ -18,6 +18,8 @@ Documentation=https://containerd.io After=network.target local-fs.target [Service] +#uncomment to enable the experimental sbservice (sandboxed) version of containerd/cri integration +#Environment="ENABLE_CRI_SANDBOXES=sandboxed" ExecStartPre=-/sbin/modprobe overlay ExecStart=/usr/local/bin/containerd diff --git a/vendor/github.com/containerd/containerd/containers/containers.go b/vendor/github.com/containerd/containerd/containers/containers.go index 7174bbd6aa6fb..49f389133649d 100644 --- a/vendor/github.com/containerd/containerd/containers/containers.go +++ b/vendor/github.com/containerd/containerd/containers/containers.go @@ -20,7 +20,7 @@ import ( "context" "time" - "github.com/gogo/protobuf/types" + "github.com/containerd/typeurl/v2" ) // Container represents the set of data pinned by a container. Unless otherwise @@ -53,7 +53,7 @@ type Container struct { // container. // // This field is required but mutable. - Spec *types.Any + Spec typeurl.Any // SnapshotKey specifies the snapshot key to use for the container's root // filesystem. When starting a task from this container, a caller should @@ -75,13 +75,18 @@ type Container struct { UpdatedAt time.Time // Extensions stores client-specified metadata - Extensions map[string]types.Any + Extensions map[string]typeurl.Any + + // SandboxID is an identifier of sandbox this container belongs to. + // + // This property is optional, but can't be changed after creation. + SandboxID string } // RuntimeInfo holds runtime specific information type RuntimeInfo struct { Name string - Options *types.Any + Options typeurl.Any } // Store interacts with the underlying container storage diff --git a/vendor/github.com/containerd/containerd/containerstore.go b/vendor/github.com/containerd/containerd/containerstore.go index 2756e2a68b9e1..331a6f41de04e 100644 --- a/vendor/github.com/containerd/containerd/containerstore.go +++ b/vendor/github.com/containerd/containerd/containerstore.go @@ -24,7 +24,9 @@ import ( containersapi "github.com/containerd/containerd/api/services/containers/v1" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/errdefs" - ptypes "github.com/gogo/protobuf/types" + "github.com/containerd/containerd/protobuf" + ptypes "github.com/containerd/containerd/protobuf/types" + "github.com/containerd/typeurl/v2" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -50,7 +52,7 @@ func (r *remoteContainers) Get(ctx context.Context, id string) (containers.Conta return containers.Container{}, errdefs.FromGRPC(err) } - return containerFromProto(&resp.Container), nil + return containerFromProto(resp.Container), nil } func (r *remoteContainers) List(ctx context.Context, filters ...string) ([]containers.Container, error) { @@ -114,7 +116,7 @@ func (r *remoteContainers) Create(ctx context.Context, container containers.Cont return containers.Container{}, errdefs.FromGRPC(err) } - return containerFromProto(&created.Container), nil + return containerFromProto(created.Container), nil } @@ -134,7 +136,7 @@ func (r *remoteContainers) Update(ctx context.Context, container containers.Cont return containers.Container{}, errdefs.FromGRPC(err) } - return containerFromProto(&updated.Container), nil + return containerFromProto(updated.Container), nil } @@ -147,19 +149,24 @@ func (r *remoteContainers) Delete(ctx context.Context, id string) error { } -func containerToProto(container *containers.Container) containersapi.Container { - return containersapi.Container{ +func containerToProto(container *containers.Container) *containersapi.Container { + extensions := make(map[string]*ptypes.Any) + for k, v := range container.Extensions { + extensions[k] = protobuf.FromAny(v) + } + return &containersapi.Container{ ID: container.ID, Labels: container.Labels, Image: container.Image, Runtime: &containersapi.Container_Runtime{ Name: container.Runtime.Name, - Options: container.Runtime.Options, + Options: protobuf.FromAny(container.Runtime.Options), }, - Spec: container.Spec, + Spec: protobuf.FromAny(container.Spec), Snapshotter: container.Snapshotter, SnapshotKey: container.SnapshotKey, - Extensions: container.Extensions, + Extensions: extensions, + Sandbox: container.SandboxID, } } @@ -171,6 +178,11 @@ func containerFromProto(containerpb *containersapi.Container) containers.Contain Options: containerpb.Runtime.Options, } } + extensions := make(map[string]typeurl.Any) + for k, v := range containerpb.Extensions { + v := v + extensions[k] = v + } return containers.Container{ ID: containerpb.ID, Labels: containerpb.Labels, @@ -179,17 +191,19 @@ func containerFromProto(containerpb *containersapi.Container) containers.Contain Spec: containerpb.Spec, Snapshotter: containerpb.Snapshotter, SnapshotKey: containerpb.SnapshotKey, - CreatedAt: containerpb.CreatedAt, - UpdatedAt: containerpb.UpdatedAt, - Extensions: containerpb.Extensions, + CreatedAt: protobuf.FromTimestamp(containerpb.CreatedAt), + UpdatedAt: protobuf.FromTimestamp(containerpb.UpdatedAt), + Extensions: extensions, + SandboxID: containerpb.Sandbox, } } -func containersFromProto(containerspb []containersapi.Container) []containers.Container { +func containersFromProto(containerspb []*containersapi.Container) []containers.Container { var containers []containers.Container for _, container := range containerspb { - containers = append(containers, containerFromProto(&container)) + container := container + containers = append(containers, containerFromProto(container)) } return containers diff --git a/vendor/github.com/containerd/containerd/content/content.go b/vendor/github.com/containerd/containerd/content/content.go index ff17a8417b39b..b7230d88b6e18 100644 --- a/vendor/github.com/containerd/containerd/content/content.go +++ b/vendor/github.com/containerd/containerd/content/content.go @@ -25,6 +25,26 @@ import ( ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) +// Store combines the methods of content-oriented interfaces into a set that +// are commonly provided by complete implementations. +// +// Overall content lifecycle: +// - Ingester is used to initiate a write operation (aka ingestion) +// - IngestManager is used to manage (e.g. list, abort) active ingestions +// - Once an ingestion is complete (see Writer.Commit), Provider is used to +// query a single piece of content by its digest +// - Manager is used to manage (e.g. list, delete) previously committed content +// +// Note that until ingestion is complete, its content is not visible through +// Provider or Manager. Once ingestion is complete, it is no longer exposed +// through IngestManager. +type Store interface { + Manager + Provider + IngestManager + Ingester +} + // ReaderAt extends the standard io.ReaderAt interface with reporting of Size and io.Closer type ReaderAt interface { io.ReaderAt @@ -42,10 +62,30 @@ type Provider interface { // Ingester writes content type Ingester interface { - // Some implementations require WithRef to be included in opts. + // Writer initiates a writing operation (aka ingestion). A single ingestion + // is uniquely identified by its ref, provided using a WithRef option. + // Writer can be called multiple times with the same ref to access the same + // ingestion. + // Once all the data is written, use Writer.Commit to complete the ingestion. Writer(ctx context.Context, opts ...WriterOpt) (Writer, error) } +// IngestManager provides methods for managing ingestions. An ingestion is a +// not-yet-complete writing operation initiated using Ingester and identified +// by a ref string. +type IngestManager interface { + // Status returns the status of the provided ref. + Status(ctx context.Context, ref string) (Status, error) + + // ListStatuses returns the status of any active ingestions whose ref match + // the provided regular expression. If empty, all active ingestions will be + // returned. + ListStatuses(ctx context.Context, filters ...string) ([]Status, error) + + // Abort completely cancels the ingest operation targeted by ref. + Abort(ctx context.Context, ref string) error +} + // Info holds content specific information // // TODO(stevvooe): Consider a very different name for this struct. Info is way @@ -58,7 +98,7 @@ type Info struct { Labels map[string]string } -// Status of a content operation +// Status of a content operation (i.e. an ingestion) type Status struct { Ref string Offset int64 @@ -94,21 +134,7 @@ type Manager interface { Delete(ctx context.Context, dgst digest.Digest) error } -// IngestManager provides methods for managing ingests. -type IngestManager interface { - // Status returns the status of the provided ref. - Status(ctx context.Context, ref string) (Status, error) - - // ListStatuses returns the status of any active ingestions whose ref match the - // provided regular expression. If empty, all active ingestions will be - // returned. - ListStatuses(ctx context.Context, filters ...string) ([]Status, error) - - // Abort completely cancels the ingest operation targeted by ref. - Abort(ctx context.Context, ref string) error -} - -// Writer handles the write of content into a content store +// Writer handles writing of content into a content store type Writer interface { // Close closes the writer, if the writer has not been // committed this allows resuming or aborting. @@ -131,15 +157,6 @@ type Writer interface { Truncate(size int64) error } -// Store combines the methods of content-oriented interfaces into a set that -// are commonly provided by complete implementations. -type Store interface { - Manager - Provider - IngestManager - Ingester -} - // Opt is used to alter the mutable properties of content type Opt func(*Info) error diff --git a/vendor/github.com/containerd/containerd/content/helpers.go b/vendor/github.com/containerd/containerd/content/helpers.go index 723c313917123..5404109a6db91 100644 --- a/vendor/github.com/containerd/containerd/content/helpers.go +++ b/vendor/github.com/containerd/containerd/content/helpers.go @@ -21,12 +21,12 @@ import ( "errors" "fmt" "io" - "math/rand" "sync" "time" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" + "github.com/containerd/containerd/pkg/randutil" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -43,16 +43,26 @@ var bufPool = sync.Pool{ }, } +type reader interface { + Reader() io.Reader +} + // NewReader returns a io.Reader from a ReaderAt func NewReader(ra ReaderAt) io.Reader { - rd := io.NewSectionReader(ra, 0, ra.Size()) - return rd + if rd, ok := ra.(reader); ok { + return rd.Reader() + } + return io.NewSectionReader(ra, 0, ra.Size()) } // ReadBlob retrieves the entire contents of the blob from the provider. // // Avoid using this for large blobs, such as layers. func ReadBlob(ctx context.Context, provider Provider, desc ocispec.Descriptor) ([]byte, error) { + if int64(len(desc.Data)) == desc.Size && digest.FromBytes(desc.Data) == desc.Digest { + return desc.Data, nil + } + ra, err := provider.ReaderAt(ctx, desc) if err != nil { return nil, err @@ -113,7 +123,7 @@ func OpenWriter(ctx context.Context, cs Ingester, opts ...WriterOpt) (Writer, er // error or abort. Requires asserting for an ingest manager select { - case <-time.After(time.Millisecond * time.Duration(rand.Intn(retry))): + case <-time.After(time.Millisecond * time.Duration(randutil.Intn(retry))): if retry < 2048 { retry = retry << 1 } diff --git a/vendor/github.com/containerd/containerd/content/local/content_local_fuzzer.go b/vendor/github.com/containerd/containerd/content/local/content_local_fuzzer.go new file mode 100644 index 0000000000000..a523f28d91148 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/content_local_fuzzer.go @@ -0,0 +1,76 @@ +//go:build gofuzz + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "bufio" + "bytes" + "context" + _ "crypto/sha256" + "io" + "testing" + + "github.com/opencontainers/go-digest" + + "github.com/containerd/containerd/content" +) + +func FuzzContentStoreWriter(data []byte) int { + t := &testing.T{} + ctx := context.Background() + ctx, _, cs, cleanup := contentStoreEnv(t) + defer cleanup() + + cw, err := cs.Writer(ctx, content.WithRef("myref")) + if err != nil { + return 0 + } + if err := cw.Close(); err != nil { + return 0 + } + + // reopen, so we can test things + cw, err = cs.Writer(ctx, content.WithRef("myref")) + if err != nil { + return 0 + } + + err = checkCopyFuzz(int64(len(data)), cw, bufio.NewReader(io.NopCloser(bytes.NewReader(data)))) + if err != nil { + return 0 + } + expected := digest.FromBytes(data) + + if err = cw.Commit(ctx, int64(len(data)), expected); err != nil { + return 0 + } + return 1 +} + +func checkCopyFuzz(size int64, dst io.Writer, src io.Reader) error { + nn, err := io.Copy(dst, src) + if err != nil { + return err + } + + if nn != size { + return err + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/content/local/readerat.go b/vendor/github.com/containerd/containerd/content/local/readerat.go index a83c171bbd2bf..899e85c0ba870 100644 --- a/vendor/github.com/containerd/containerd/content/local/readerat.go +++ b/vendor/github.com/containerd/containerd/content/local/readerat.go @@ -18,6 +18,7 @@ package local import ( "fmt" + "io" "os" "github.com/containerd/containerd/content" @@ -65,3 +66,7 @@ func (ra sizeReaderAt) Size() int64 { func (ra sizeReaderAt) Close() error { return ra.fp.Close() } + +func (ra sizeReaderAt) Reader() io.Reader { + return io.LimitReader(ra.fp, ra.size) +} diff --git a/vendor/github.com/containerd/containerd/content/local/store.go b/vendor/github.com/containerd/containerd/content/local/store.go index f41a92d04a586..baae3565bb47f 100644 --- a/vendor/github.com/containerd/containerd/content/local/store.go +++ b/vendor/github.com/containerd/containerd/content/local/store.go @@ -20,7 +20,6 @@ import ( "context" "fmt" "io" - "math/rand" "os" "path/filepath" "strconv" @@ -32,9 +31,10 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/filters" "github.com/containerd/containerd/log" + "github.com/containerd/containerd/pkg/randutil" "github.com/sirupsen/logrus" - digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -262,7 +262,7 @@ func (s *store) Walk(ctx context.Context, fn content.WalkFunc, fs ...string) err return nil } - dgst := digest.NewDigestFromHex(alg.String(), filepath.Base(path)) + dgst := digest.NewDigestFromEncoded(alg, filepath.Base(path)) if err := dgst.Validate(); err != nil { // log error but don't report log.L.WithError(err).WithField("path", path).Error("invalid digest for blob path") @@ -473,7 +473,7 @@ func (s *store) Writer(ctx context.Context, opts ...content.WriterOpt) (content. lockErr = nil break } - time.Sleep(time.Millisecond * time.Duration(rand.Intn(1<= 0; i-- { processor, ok := handlers[i](ctx, stream.MediaType()) @@ -71,7 +71,7 @@ func StaticHandler(expectedMediaType string, fn StreamProcessorInit) Handler { } // StreamProcessorInit returns the initialized stream processor -type StreamProcessorInit func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) +type StreamProcessorInit func(ctx context.Context, stream StreamProcessor, payloads map[string]typeurl.Any) (StreamProcessor, error) // RawProcessor provides access to direct fd for processing type RawProcessor interface { @@ -93,7 +93,7 @@ func compressedHandler(ctx context.Context, mediaType string) (StreamProcessorIn return nil, false } if compressed != "" { - return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) { + return func(ctx context.Context, stream StreamProcessor, payloads map[string]typeurl.Any) (StreamProcessor, error) { ds, err := compression.DecompressStream(stream) if err != nil { return nil, err @@ -104,7 +104,7 @@ func compressedHandler(ctx context.Context, mediaType string) (StreamProcessorIn }, nil }, true } - return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) { + return func(ctx context.Context, stream StreamProcessor, payloads map[string]typeurl.Any) (StreamProcessor, error) { return &stdProcessor{ rc: stream, }, nil @@ -179,7 +179,7 @@ func BinaryHandler(id, returnsMediaType string, mediaTypes []string, path string } return func(_ context.Context, mediaType string) (StreamProcessorInit, bool) { if _, ok := set[mediaType]; ok { - return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) { + return func(ctx context.Context, stream StreamProcessor, payloads map[string]typeurl.Any) (StreamProcessor, error) { payload := payloads[id] return NewBinaryProcessor(ctx, mediaType, returnsMediaType, stream, path, args, env, payload) }, true diff --git a/vendor/github.com/containerd/containerd/diff/stream_unix.go b/vendor/github.com/containerd/containerd/diff/stream_unix.go index 6622c331eea9c..82fc31b402d39 100644 --- a/vendor/github.com/containerd/containerd/diff/stream_unix.go +++ b/vendor/github.com/containerd/containerd/diff/stream_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. @@ -28,20 +27,22 @@ import ( "os" "sync" - "github.com/gogo/protobuf/proto" - "github.com/gogo/protobuf/types" + "github.com/containerd/containerd/protobuf" + "github.com/containerd/containerd/protobuf/proto" + "github.com/containerd/typeurl/v2" exec "golang.org/x/sys/execabs" ) // NewBinaryProcessor returns a binary processor for use with processing content streams -func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProcessor, name string, args, env []string, payload *types.Any) (StreamProcessor, error) { +func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProcessor, name string, args, env []string, payload typeurl.Any) (StreamProcessor, error) { cmd := exec.CommandContext(ctx, name, args...) cmd.Env = os.Environ() cmd.Env = append(cmd.Env, env...) var payloadC io.Closer if payload != nil { - data, err := proto.Marshal(payload) + pb := protobuf.FromAny(payload) + data, err := proto.Marshal(pb) if err != nil { return nil, err } @@ -87,6 +88,7 @@ func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProce r: r, mt: rmt, stderr: stderr, + done: make(chan struct{}), } go p.wait() @@ -109,6 +111,11 @@ type binaryProcessor struct { mu sync.Mutex err error + + // There is a race condition between waiting on c.cmd.Wait() and setting c.err within + // c.wait(), and reading that value from c.Err(). + // Use done to wait for the returned error to be captured and set. + done chan struct{} } func (c *binaryProcessor) Err() error { @@ -125,6 +132,16 @@ func (c *binaryProcessor) wait() { c.mu.Unlock() } } + close(c.done) +} + +func (c *binaryProcessor) Wait(ctx context.Context) error { + select { + case <-c.done: + return c.Err() + case <-ctx.Done(): + return ctx.Err() + } } func (c *binaryProcessor) File() *os.File { diff --git a/vendor/github.com/containerd/containerd/diff/stream_windows.go b/vendor/github.com/containerd/containerd/diff/stream_windows.go index c0bf03b94ab32..e6026f4e98ad0 100644 --- a/vendor/github.com/containerd/containerd/diff/stream_windows.go +++ b/vendor/github.com/containerd/containerd/diff/stream_windows.go @@ -27,8 +27,9 @@ import ( "sync" winio "github.com/Microsoft/go-winio" - "github.com/gogo/protobuf/proto" - "github.com/gogo/protobuf/types" + "github.com/containerd/containerd/protobuf" + "github.com/containerd/containerd/protobuf/proto" + "github.com/containerd/typeurl/v2" "github.com/sirupsen/logrus" exec "golang.org/x/sys/execabs" ) @@ -36,13 +37,14 @@ import ( const processorPipe = "STREAM_PROCESSOR_PIPE" // NewBinaryProcessor returns a binary processor for use with processing content streams -func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProcessor, name string, args, env []string, payload *types.Any) (StreamProcessor, error) { +func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProcessor, name string, args, env []string, payload typeurl.Any) (StreamProcessor, error) { cmd := exec.CommandContext(ctx, name, args...) cmd.Env = os.Environ() cmd.Env = append(cmd.Env, env...) if payload != nil { - data, err := proto.Marshal(payload) + pb := protobuf.FromAny(payload) + data, err := proto.Marshal(pb) if err != nil { return nil, err } @@ -96,6 +98,7 @@ func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProce r: r, mt: rmt, stderr: stderr, + done: make(chan struct{}), } go p.wait() @@ -115,6 +118,11 @@ type binaryProcessor struct { mu sync.Mutex err error + + // There is a race condition between waiting on c.cmd.Wait() and setting c.err within + // c.wait(), and reading that value from c.Err(). + // Use done to wait for the returned error to be captured and set. + done chan struct{} } func (c *binaryProcessor) Err() error { @@ -131,6 +139,16 @@ func (c *binaryProcessor) wait() { c.mu.Unlock() } } + close(c.done) +} + +func (c *binaryProcessor) Wait(ctx context.Context) error { + select { + case <-c.done: + return c.Err() + case <-ctx.Done(): + return ctx.Err() + } } func (c *binaryProcessor) File() *os.File { diff --git a/vendor/github.com/containerd/containerd/diff/walking/differ.go b/vendor/github.com/containerd/containerd/diff/walking/differ.go index a24c72273c52d..784d411395454 100644 --- a/vendor/github.com/containerd/containerd/diff/walking/differ.go +++ b/vendor/github.com/containerd/containerd/diff/walking/differ.go @@ -18,11 +18,11 @@ package walking import ( "context" + "crypto/rand" "encoding/base64" "errors" "fmt" "io" - "math/rand" "time" "github.com/containerd/containerd/archive" @@ -30,8 +30,10 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/diff" "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/labels" "github.com/containerd/containerd/log" "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/pkg/epoch" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -41,7 +43,6 @@ type walkingDiff struct { } var emptyDesc = ocispec.Descriptor{} -var uncompressed = "containerd.io/uncompressed" // NewWalkingDiff is a generic implementation of diff.Comparer. The diff is // calculated by mounting both the upper and lower mount sets and walking the @@ -64,6 +65,14 @@ func (s *walkingDiff) Compare(ctx context.Context, lower, upper []mount.Mount, o return emptyDesc, err } } + if tm := epoch.FromContext(ctx); tm != nil && config.SourceDateEpoch == nil { + config.SourceDateEpoch = tm + } + + var writeDiffOpts []archive.WriteDiffOpt + if config.SourceDateEpoch != nil { + writeDiffOpts = append(writeDiffOpts, archive.WithSourceDateEpoch(config.SourceDateEpoch)) + } var isCompressed bool if config.Compressor != nil { @@ -136,7 +145,7 @@ func (s *walkingDiff) Compare(ctx context.Context, lower, upper []mount.Mount, o return fmt.Errorf("failed to get compressed stream: %w", errOpen) } } - errOpen = archive.WriteDiff(ctx, io.MultiWriter(compressed, dgstr.Hash()), lowerRoot, upperRoot) + errOpen = archive.WriteDiff(ctx, io.MultiWriter(compressed, dgstr.Hash()), lowerRoot, upperRoot, writeDiffOpts...) compressed.Close() if errOpen != nil { return fmt.Errorf("failed to write compressed diff: %w", errOpen) @@ -145,9 +154,9 @@ func (s *walkingDiff) Compare(ctx context.Context, lower, upper []mount.Mount, o if config.Labels == nil { config.Labels = map[string]string{} } - config.Labels[uncompressed] = dgstr.Digest().String() + config.Labels[labels.LabelUncompressed] = dgstr.Digest().String() } else { - if errOpen = archive.WriteDiff(ctx, cw, lowerRoot, upperRoot); errOpen != nil { + if errOpen = archive.WriteDiff(ctx, cw, lowerRoot, upperRoot, writeDiffOpts...); errOpen != nil { return fmt.Errorf("failed to write diff: %w", errOpen) } } @@ -172,10 +181,10 @@ func (s *walkingDiff) Compare(ctx context.Context, lower, upper []mount.Mount, o if info.Labels == nil { info.Labels = make(map[string]string) } - // Set uncompressed label if digest already existed without label - if _, ok := info.Labels[uncompressed]; !ok { - info.Labels[uncompressed] = config.Labels[uncompressed] - if _, err := s.store.Update(ctx, info, "labels."+uncompressed); err != nil { + // Set "containerd.io/uncompressed" label if digest already existed without label + if _, ok := info.Labels[labels.LabelUncompressed]; !ok { + info.Labels[labels.LabelUncompressed] = config.Labels[labels.LabelUncompressed] + if _, err := s.store.Update(ctx, info, "labels."+labels.LabelUncompressed); err != nil { return fmt.Errorf("error setting uncompressed label: %w", err) } } diff --git a/vendor/github.com/containerd/containerd/events.go b/vendor/github.com/containerd/containerd/events.go index 3577b7c3a9fc0..32d2dfc3158e4 100644 --- a/vendor/github.com/containerd/containerd/events.go +++ b/vendor/github.com/containerd/containerd/events.go @@ -22,7 +22,8 @@ import ( eventsapi "github.com/containerd/containerd/api/services/events/v1" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/events" - "github.com/containerd/typeurl" + "github.com/containerd/containerd/protobuf" + "github.com/containerd/typeurl/v2" ) // EventService handles the publish, forward and subscribe of events. @@ -51,7 +52,7 @@ func (e *eventRemote) Publish(ctx context.Context, topic string, event events.Ev } req := &eventsapi.PublishRequest{ Topic: topic, - Event: any, + Event: protobuf.FromAny(any), } if _, err := e.client.Publish(ctx, req); err != nil { return errdefs.FromGRPC(err) @@ -62,10 +63,10 @@ func (e *eventRemote) Publish(ctx context.Context, topic string, event events.Ev func (e *eventRemote) Forward(ctx context.Context, envelope *events.Envelope) error { req := &eventsapi.ForwardRequest{ Envelope: &eventsapi.Envelope{ - Timestamp: envelope.Timestamp, + Timestamp: protobuf.ToTimestamp(envelope.Timestamp), Namespace: envelope.Namespace, Topic: envelope.Topic, - Event: envelope.Event, + Event: protobuf.FromAny(envelope.Event), }, } if _, err := e.client.Forward(ctx, req); err != nil { @@ -104,7 +105,7 @@ func (e *eventRemote) Subscribe(ctx context.Context, filters ...string) (ch <-ch select { case evq <- &events.Envelope{ - Timestamp: ev.Timestamp, + Timestamp: protobuf.FromTimestamp(ev.Timestamp), Namespace: ev.Namespace, Topic: ev.Topic, Event: ev.Event, diff --git a/vendor/github.com/containerd/containerd/events/events.go b/vendor/github.com/containerd/containerd/events/events.go index b7eb86f1eb654..ff5642301f40b 100644 --- a/vendor/github.com/containerd/containerd/events/events.go +++ b/vendor/github.com/containerd/containerd/events/events.go @@ -20,8 +20,7 @@ import ( "context" "time" - "github.com/containerd/typeurl" - "github.com/gogo/protobuf/types" + "github.com/containerd/typeurl/v2" ) // Envelope provides the packaging for an event. @@ -29,7 +28,7 @@ type Envelope struct { Timestamp time.Time Namespace string Topic string - Event *types.Any + Event typeurl.Any } // Field returns the value for the given fieldpath as a string, if defined. diff --git a/vendor/github.com/containerd/containerd/events/exchange/exchange.go b/vendor/github.com/containerd/containerd/events/exchange/exchange.go index a1f385d7abd3d..e18377c11ae6d 100644 --- a/vendor/github.com/containerd/containerd/events/exchange/exchange.go +++ b/vendor/github.com/containerd/containerd/events/exchange/exchange.go @@ -28,10 +28,8 @@ import ( "github.com/containerd/containerd/identifiers" "github.com/containerd/containerd/log" "github.com/containerd/containerd/namespaces" - "github.com/containerd/typeurl" + "github.com/containerd/typeurl/v2" goevents "github.com/docker/go-events" - "github.com/gogo/protobuf/types" - "github.com/sirupsen/logrus" ) // Exchange broadcasts events @@ -60,10 +58,10 @@ func (e *Exchange) Forward(ctx context.Context, envelope *events.Envelope) (err } defer func() { - logger := log.G(ctx).WithFields(logrus.Fields{ + logger := log.G(ctx).WithFields(log.Fields{ "topic": envelope.Topic, "ns": envelope.Namespace, - "type": envelope.Event.TypeUrl, + "type": envelope.Event.GetTypeUrl(), }) if err != nil { @@ -82,7 +80,6 @@ func (e *Exchange) Forward(ctx context.Context, envelope *events.Envelope) (err func (e *Exchange) Publish(ctx context.Context, topic string, event events.Event) (err error) { var ( namespace string - encoded *types.Any envelope events.Envelope ) @@ -94,7 +91,7 @@ func (e *Exchange) Publish(ctx context.Context, topic string, event events.Event return fmt.Errorf("envelope topic %q: %w", topic, err) } - encoded, err = typeurl.MarshalAny(event) + encoded, err := typeurl.MarshalAny(event) if err != nil { return err } @@ -105,10 +102,10 @@ func (e *Exchange) Publish(ctx context.Context, topic string, event events.Event envelope.Event = encoded defer func() { - logger := log.G(ctx).WithFields(logrus.Fields{ + logger := log.G(ctx).WithFields(log.Fields{ "topic": envelope.Topic, "ns": envelope.Namespace, - "type": envelope.Event.TypeUrl, + "type": envelope.Event.GetTypeUrl(), }) if err != nil { diff --git a/vendor/github.com/containerd/containerd/image.go b/vendor/github.com/containerd/containerd/image.go index 784df5dd951d7..40dc3ff6cb73c 100644 --- a/vendor/github.com/containerd/containerd/image.go +++ b/vendor/github.com/containerd/containerd/image.go @@ -28,6 +28,7 @@ import ( "github.com/containerd/containerd/diff" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" + "github.com/containerd/containerd/labels" "github.com/containerd/containerd/pkg/kmutex" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/rootfs" @@ -64,6 +65,8 @@ type Image interface { Metadata() images.Image // Platform returns the platform match comparer. Can be nil. Platform() platforms.MatchComparer + // Spec returns the OCI image spec for a given image. + Spec(ctx context.Context) (ocispec.Image, error) } type usageOptions struct { @@ -279,6 +282,26 @@ func (i *image) IsUnpacked(ctx context.Context, snapshotterName string) (bool, e return false, nil } +func (i *image) Spec(ctx context.Context) (ocispec.Image, error) { + var ociImage ocispec.Image + + desc, err := i.Config(ctx) + if err != nil { + return ociImage, fmt.Errorf("get image config descriptor: %w", err) + } + + blob, err := content.ReadBlob(ctx, i.ContentStore(), desc) + if err != nil { + return ociImage, fmt.Errorf("read image config from content store: %w", err) + } + + if err := json.Unmarshal(blob, &ociImage); err != nil { + return ociImage, fmt.Errorf("unmarshal image config %s: %w", blob, err) + } + + return ociImage, nil +} + // UnpackConfig provides configuration for the unpack of an image type UnpackConfig struct { // ApplyOpts for applying a diff to a snapshotter @@ -370,10 +393,10 @@ func (i *image) Unpack(ctx context.Context, snapshotterName string, opts ...Unpa cinfo := content.Info{ Digest: layer.Blob.Digest, Labels: map[string]string{ - "containerd.io/uncompressed": layer.Diff.Digest.String(), + labels.LabelUncompressed: layer.Diff.Digest.String(), }, } - if _, err := cs.Update(ctx, cinfo, "labels.containerd.io/uncompressed"); err != nil { + if _, err := cs.Update(ctx, cinfo, "labels."+labels.LabelUncompressed); err != nil { return err } } diff --git a/vendor/github.com/containerd/containerd/image_store.go b/vendor/github.com/containerd/containerd/image_store.go index fd79e8929f528..524a7a6727a4d 100644 --- a/vendor/github.com/containerd/containerd/image_store.go +++ b/vendor/github.com/containerd/containerd/image_store.go @@ -23,8 +23,12 @@ import ( "github.com/containerd/containerd/api/types" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" - ptypes "github.com/gogo/protobuf/types" + "github.com/containerd/containerd/pkg/epoch" + "github.com/containerd/containerd/protobuf" + ptypes "github.com/containerd/containerd/protobuf/types" + "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "google.golang.org/protobuf/types/known/timestamppb" ) type remoteImages struct { @@ -61,14 +65,18 @@ func (s *remoteImages) List(ctx context.Context, filters ...string) ([]images.Im } func (s *remoteImages) Create(ctx context.Context, image images.Image) (images.Image, error) { - created, err := s.client.Create(ctx, &imagesapi.CreateImageRequest{ + req := &imagesapi.CreateImageRequest{ Image: imageToProto(&image), - }) + } + if tm := epoch.FromContext(ctx); tm != nil { + req.SourceDateEpoch = timestamppb.New(*tm) + } + created, err := s.client.Create(ctx, req) if err != nil { return images.Image{}, errdefs.FromGRPC(err) } - return imageFromProto(&created.Image), nil + return imageFromProto(created.Image), nil } func (s *remoteImages) Update(ctx context.Context, image images.Image, fieldpaths ...string) (images.Image, error) { @@ -78,16 +86,19 @@ func (s *remoteImages) Update(ctx context.Context, image images.Image, fieldpath Paths: fieldpaths, } } - - updated, err := s.client.Update(ctx, &imagesapi.UpdateImageRequest{ + req := &imagesapi.UpdateImageRequest{ Image: imageToProto(&image), UpdateMask: updateMask, - }) + } + if tm := epoch.FromContext(ctx); tm != nil { + req.SourceDateEpoch = timestamppb.New(*tm) + } + updated, err := s.client.Update(ctx, req) if err != nil { return images.Image{}, errdefs.FromGRPC(err) } - return imageFromProto(&updated.Image), nil + return imageFromProto(updated.Image), nil } func (s *remoteImages) Delete(ctx context.Context, name string, opts ...images.DeleteOpt) error { @@ -105,13 +116,13 @@ func (s *remoteImages) Delete(ctx context.Context, name string, opts ...images.D return errdefs.FromGRPC(err) } -func imageToProto(image *images.Image) imagesapi.Image { - return imagesapi.Image{ +func imageToProto(image *images.Image) *imagesapi.Image { + return &imagesapi.Image{ Name: image.Name, Labels: image.Labels, Target: descToProto(&image.Target), - CreatedAt: image.CreatedAt, - UpdatedAt: image.UpdatedAt, + CreatedAt: protobuf.ToTimestamp(image.CreatedAt), + UpdatedAt: protobuf.ToTimestamp(image.UpdatedAt), } } @@ -119,17 +130,18 @@ func imageFromProto(imagepb *imagesapi.Image) images.Image { return images.Image{ Name: imagepb.Name, Labels: imagepb.Labels, - Target: descFromProto(&imagepb.Target), - CreatedAt: imagepb.CreatedAt, - UpdatedAt: imagepb.UpdatedAt, + Target: descFromProto(imagepb.Target), + CreatedAt: protobuf.FromTimestamp(imagepb.CreatedAt), + UpdatedAt: protobuf.FromTimestamp(imagepb.UpdatedAt), } } -func imagesFromProto(imagespb []imagesapi.Image) []images.Image { +func imagesFromProto(imagespb []*imagesapi.Image) []images.Image { var images []images.Image for _, image := range imagespb { - images = append(images, imageFromProto(&image)) + image := image + images = append(images, imageFromProto(image)) } return images @@ -138,17 +150,17 @@ func imagesFromProto(imagespb []imagesapi.Image) []images.Image { func descFromProto(desc *types.Descriptor) ocispec.Descriptor { return ocispec.Descriptor{ MediaType: desc.MediaType, - Size: desc.Size_, - Digest: desc.Digest, + Size: desc.Size, + Digest: digest.Digest(desc.Digest), Annotations: desc.Annotations, } } -func descToProto(desc *ocispec.Descriptor) types.Descriptor { - return types.Descriptor{ +func descToProto(desc *ocispec.Descriptor) *types.Descriptor { + return &types.Descriptor{ MediaType: desc.MediaType, - Size_: desc.Size, - Digest: desc.Digest, + Size: desc.Size, + Digest: desc.Digest.String(), Annotations: desc.Annotations, } } diff --git a/vendor/github.com/containerd/containerd/images/archive/exporter.go b/vendor/github.com/containerd/containerd/images/archive/exporter.go index 40a0a33df06ea..87858a958ba90 100644 --- a/vendor/github.com/containerd/containerd/images/archive/exporter.go +++ b/vendor/github.com/containerd/containerd/images/archive/exporter.go @@ -89,6 +89,18 @@ func WithImage(is images.Store, name string) ExportOpt { } } +// WithImages adds multiples images to the exported archive. +func WithImages(imgs []images.Image) ExportOpt { + return func(ctx context.Context, o *exportOptions) error { + for _, img := range imgs { + img.Target.Annotations = addNameAnnotation(img.Name, img.Target.Annotations) + o.manifests = append(o.manifests, img.Target) + } + + return nil + } +} + // WithManifest adds a manifest to the exported archive. // When names are given they will be set on the manifest in the // exported archive, creating an index record for each name. diff --git a/vendor/github.com/containerd/containerd/images/archive/importer.go b/vendor/github.com/containerd/containerd/images/archive/importer.go index c1c802fb55c70..3ca88091cfdd6 100644 --- a/vendor/github.com/containerd/containerd/images/archive/importer.go +++ b/vendor/github.com/containerd/containerd/images/archive/importer.go @@ -31,6 +31,7 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" + "github.com/containerd/containerd/labels" "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" digest "github.com/opencontainers/go-digest" @@ -94,6 +95,7 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opt symlinks[hdr.Name] = path.Join(path.Dir(hdr.Name), hdr.Linkname) } + //nolint:staticcheck // TypeRegA is deprecated but we may still receive an external tar with TypeRegA if hdr.Typeflag != tar.TypeReg && hdr.Typeflag != tar.TypeRegA { if hdr.Typeflag != tar.TypeDir { log.G(ctx).WithField("file", hdr.Name).Debug("file type ignored") @@ -263,11 +265,11 @@ func resolveLayers(ctx context.Context, store content.Store, layerFiles []string } layers[i] = desc descs[desc.Digest] = &layers[i] - filters = append(filters, "labels.\"containerd.io/uncompressed\"=="+desc.Digest.String()) + filters = append(filters, fmt.Sprintf("labels.\"%s\"==%s", labels.LabelUncompressed, desc.Digest.String())) } err := store.Walk(ctx, func(info content.Info) error { - dgst, ok := info.Labels["containerd.io/uncompressed"] + dgst, ok := info.Labels[labels.LabelUncompressed] if ok { desc := descs[digest.Digest(dgst)] if desc != nil { @@ -307,7 +309,7 @@ func resolveLayers(ctx context.Context, store content.Store, layerFiles []string } ref := fmt.Sprintf("compress-blob-%s-%s", desc.Digest.Algorithm().String(), desc.Digest.Encoded()) labels := map[string]string{ - "containerd.io/uncompressed": desc.Digest.String(), + labels.LabelUncompressed: desc.Digest.String(), } layers[i], err = compressBlob(ctx, store, s, ref, content.WithLabels(labels)) if err != nil { diff --git a/vendor/github.com/containerd/containerd/images/archive/reference.go b/vendor/github.com/containerd/containerd/images/archive/reference.go index ba19b111f11c0..8a030fbfa5b42 100644 --- a/vendor/github.com/containerd/containerd/images/archive/reference.go +++ b/vendor/github.com/containerd/containerd/images/archive/reference.go @@ -41,6 +41,9 @@ func AddRefPrefix(image string) func(string) string { // a full reference. func refTranslator(image string, checkPrefix bool) func(string) string { return func(ref string) string { + if image == "" { + return "" + } // Check if ref is full reference if strings.ContainsAny(ref, "/:@") { // If not prefixed, don't include image diff --git a/vendor/github.com/containerd/containerd/images/converter/default.go b/vendor/github.com/containerd/containerd/images/converter/default.go index f4e944bc54b73..c67617e4ccfc4 100644 --- a/vendor/github.com/containerd/containerd/images/converter/default.go +++ b/vendor/github.com/containerd/containerd/images/converter/default.go @@ -132,7 +132,7 @@ func copyDesc(desc ocispec.Descriptor) *ocispec.Descriptor { return &descCopy } -// convertLayer converts image image layers if c.layerConvertFunc is set. +// convertLayer converts image layers if c.layerConvertFunc is set. // // c.layerConvertFunc can be nil, e.g., for converting Docker media types to OCI ones. func (c *defaultConverter) convertLayer(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) { @@ -410,6 +410,7 @@ func writeJSON(ctx context.Context, cs content.Store, x interface{}, oldDesc oci return nil, err } if err := content.Copy(ctx, w, bytes.NewReader(b), int64(len(b)), dgst, content.WithLabels(labels)); err != nil { + w.Close() return nil, err } if err := w.Close(); err != nil { diff --git a/vendor/github.com/containerd/containerd/images/image.go b/vendor/github.com/containerd/containerd/images/image.go index d45afe482c900..2d2e36a9a3053 100644 --- a/vendor/github.com/containerd/containerd/images/image.go +++ b/vendor/github.com/containerd/containerd/images/image.go @@ -138,7 +138,7 @@ type platformManifest struct { // TODO(stevvooe): This violates the current platform agnostic approach to this // package by returning a specific manifest type. We'll need to refactor this // to return a manifest descriptor or decide that we want to bring the API in -// this direction because this abstraction is not needed.` +// this direction because this abstraction is not needed. func Manifest(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Manifest, error) { var ( limit = 1 @@ -311,7 +311,7 @@ func Check(ctx context.Context, provider content.Provider, image ocispec.Descrip return false, nil, nil, nil, fmt.Errorf("failed to check image %v: %w", image.Digest, err) } - // TODO(stevvooe): It is possible that referenced conponents could have + // TODO(stevvooe): It is possible that referenced components could have // children, but this is rare. For now, we ignore this and only verify // that manifest components are present. required = append([]ocispec.Descriptor{mfst.Config}, mfst.Layers...) diff --git a/vendor/github.com/containerd/containerd/images/mediatypes.go b/vendor/github.com/containerd/containerd/images/mediatypes.go index 671e160e15d2e..067963babba16 100644 --- a/vendor/github.com/containerd/containerd/images/mediatypes.go +++ b/vendor/github.com/containerd/containerd/images/mediatypes.go @@ -38,7 +38,9 @@ const ( MediaTypeDockerSchema2Config = "application/vnd.docker.container.image.v1+json" MediaTypeDockerSchema2Manifest = "application/vnd.docker.distribution.manifest.v2+json" MediaTypeDockerSchema2ManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" + // Checkpoint/Restore Media Types + MediaTypeContainerd1Checkpoint = "application/vnd.containerd.container.criu.checkpoint.criu.tar" MediaTypeContainerd1CheckpointPreDump = "application/vnd.containerd.container.criu.checkpoint.predump.tar" MediaTypeContainerd1Resource = "application/vnd.containerd.container.resource.tar" @@ -47,9 +49,12 @@ const ( MediaTypeContainerd1CheckpointOptions = "application/vnd.containerd.container.checkpoint.options.v1+proto" MediaTypeContainerd1CheckpointRuntimeName = "application/vnd.containerd.container.checkpoint.runtime.name" MediaTypeContainerd1CheckpointRuntimeOptions = "application/vnd.containerd.container.checkpoint.runtime.options+proto" - // Legacy Docker schema1 manifest + + // MediaTypeDockerSchema1Manifest is the legacy Docker schema1 manifest MediaTypeDockerSchema1Manifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" - // Encypted media types + + // Encrypted media types + MediaTypeImageLayerEncrypted = ocispec.MediaTypeImageLayer + "+encrypted" MediaTypeImageLayerGzipEncrypted = ocispec.MediaTypeImageLayerGzip + "+encrypted" ) @@ -93,16 +98,23 @@ func DiffCompression(ctx context.Context, mediaType string) (string, error) { // parseMediaTypes splits the media type into the base type and // an array of sorted extensions -func parseMediaTypes(mt string) (string, []string) { +func parseMediaTypes(mt string) (mediaType string, suffixes []string) { if mt == "" { return "", []string{} } + mediaType, ext, ok := strings.Cut(mt, "+") + if !ok { + return mediaType, []string{} + } - s := strings.Split(mt, "+") - ext := s[1:] - sort.Strings(ext) - - return s[0], ext + // Splitting the extensions following the mediatype "(+)gzip+encrypted". + // We expect this to be a limited list, so add an arbitrary limit (50). + // + // Note that DiffCompression is only using the last element, so perhaps we + // should split on the last "+" only. + suffixes = strings.SplitN(ext, "+", 50) + sort.Strings(suffixes) + return mediaType, suffixes } // IsNonDistributable returns true if the media type is non-distributable. @@ -118,8 +130,7 @@ func IsLayerType(mt string) bool { } // Parse Docker media types, strip off any + suffixes first - base, _ := parseMediaTypes(mt) - switch base { + switch base, _ := parseMediaTypes(mt); base { case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip, MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip: return true diff --git a/vendor/github.com/containerd/containerd/import.go b/vendor/github.com/containerd/containerd/import.go index 8936d88effd6e..ca55920dc492a 100644 --- a/vendor/github.com/containerd/containerd/import.go +++ b/vendor/github.com/containerd/containerd/import.go @@ -38,6 +38,7 @@ type importOpts struct { allPlatforms bool platformMatcher platforms.MatchComparer compress bool + discardLayers bool } // ImportOpt allows the caller to specify import specific options @@ -105,6 +106,15 @@ func WithImportCompression() ImportOpt { } } +// WithDiscardUnpackedLayers allows the garbage collector to clean up +// layers from content store after unpacking. +func WithDiscardUnpackedLayers() ImportOpt { + return func(c *importOpts) error { + c.discardLayers = true + return nil + } +} + // Import imports an image from a Tar stream using reader. // Caller needs to specify importer. Future version may use oci.v1 as the default. // Note that unreferenced blobs may be imported to the content store as well. @@ -195,7 +205,11 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt } handler = images.FilterPlatforms(handler, platformMatcher) - handler = images.SetChildrenLabels(cs, handler) + if iopts.discardLayers { + handler = images.SetChildrenMappedLabels(cs, handler, images.ChildGCLabelsFilterLayers) + } else { + handler = images.SetChildrenLabels(cs, handler) + } if err := images.WalkNotEmpty(ctx, handler, index); err != nil { return nil, err } diff --git a/vendor/github.com/containerd/containerd/install.go b/vendor/github.com/containerd/containerd/install.go index 16cff08d24abe..c3ea823844c63 100644 --- a/vendor/github.com/containerd/containerd/install.go +++ b/vendor/github.com/containerd/containerd/install.go @@ -70,7 +70,8 @@ func (c *Client) Install(ctx context.Context, image Image, opts ...InstallOpts) ra.Close() return err } - if _, err := archive.Apply(ctx, path, r, archive.WithFilter(func(hdr *tar.Header) (bool, error) { + + filter := archive.WithFilter(func(hdr *tar.Header) (bool, error) { d := filepath.Dir(hdr.Name) result := d == binDir @@ -87,7 +88,15 @@ func (c *Client) Install(ctx context.Context, image Image, opts ...InstallOpts) } } return result, nil - })); err != nil { + }) + + opts := []archive.ApplyOpt{filter} + + if runtime.GOOS == "windows" { + opts = append(opts, archive.WithNoSameOwner()) + } + + if _, err := archive.Apply(ctx, path, r, opts...); err != nil { r.Close() ra.Close() return err diff --git a/vendor/github.com/containerd/containerd/labels/labels.go b/vendor/github.com/containerd/containerd/labels/labels.go index d76ff2cf9c999..6be006c3e1fce 100644 --- a/vendor/github.com/containerd/containerd/labels/labels.go +++ b/vendor/github.com/containerd/containerd/labels/labels.go @@ -19,3 +19,7 @@ package labels // LabelUncompressed is added to compressed layer contents. // The value is digest of the uncompressed content. const LabelUncompressed = "containerd.io/uncompressed" + +// LabelSharedNamespace is added to a namespace to allow that namespaces +// contents to be shared. +const LabelSharedNamespace = "containerd.io/namespace.shareable" diff --git a/vendor/github.com/containerd/containerd/labels/validate.go b/vendor/github.com/containerd/containerd/labels/validate.go index 1fd527adb35fa..f83b5dde294d6 100644 --- a/vendor/github.com/containerd/containerd/labels/validate.go +++ b/vendor/github.com/containerd/containerd/labels/validate.go @@ -24,15 +24,18 @@ import ( const ( maxSize = 4096 + // maximum length of key portion of error message if len of key + len of value > maxSize + keyMaxLen = 64 ) // Validate a label's key and value are under 4096 bytes func Validate(k, v string) error { - if (len(k) + len(v)) > maxSize { - if len(k) > 10 { - k = k[:10] + total := len(k) + len(v) + if total > maxSize { + if len(k) > keyMaxLen { + k = k[:keyMaxLen] } - return fmt.Errorf("label key and value greater than maximum size (%d bytes), key: %s: %w", maxSize, k, errdefs.ErrInvalidArgument) + return fmt.Errorf("label key and value length (%d bytes) greater than maximum size (%d bytes), key: %s: %w", total, maxSize, k, errdefs.ErrInvalidArgument) } return nil } diff --git a/vendor/github.com/containerd/containerd/leases/id.go b/vendor/github.com/containerd/containerd/leases/id.go index 8781a1d72ac6f..8f5dc93f348e6 100644 --- a/vendor/github.com/containerd/containerd/leases/id.go +++ b/vendor/github.com/containerd/containerd/leases/id.go @@ -17,9 +17,9 @@ package leases import ( + "crypto/rand" "encoding/base64" "fmt" - "math/rand" "time" ) diff --git a/vendor/github.com/containerd/containerd/leases/proxy/manager.go b/vendor/github.com/containerd/containerd/leases/proxy/manager.go index 96cd5e653ba47..ae42d8eb102c0 100644 --- a/vendor/github.com/containerd/containerd/leases/proxy/manager.go +++ b/vendor/github.com/containerd/containerd/leases/proxy/manager.go @@ -22,6 +22,7 @@ import ( leasesapi "github.com/containerd/containerd/api/services/leases/v1" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/protobuf" ) type proxyManager struct { @@ -53,7 +54,7 @@ func (pm *proxyManager) Create(ctx context.Context, opts ...leases.Opt) (leases. return leases.Lease{ ID: resp.Lease.ID, - CreatedAt: resp.Lease.CreatedAt, + CreatedAt: protobuf.FromTimestamp(resp.Lease.CreatedAt), Labels: resp.Lease.Labels, }, nil } @@ -84,7 +85,7 @@ func (pm *proxyManager) List(ctx context.Context, filters ...string) ([]leases.L for i := range resp.Leases { l[i] = leases.Lease{ ID: resp.Leases[i].ID, - CreatedAt: resp.Leases[i].CreatedAt, + CreatedAt: protobuf.FromTimestamp(resp.Leases[i].CreatedAt), Labels: resp.Leases[i].Labels, } } @@ -95,7 +96,7 @@ func (pm *proxyManager) List(ctx context.Context, filters ...string) ([]leases.L func (pm *proxyManager) AddResource(ctx context.Context, lease leases.Lease, r leases.Resource) error { _, err := pm.client.AddResource(ctx, &leasesapi.AddResourceRequest{ ID: lease.ID, - Resource: leasesapi.Resource{ + Resource: &leasesapi.Resource{ ID: r.ID, Type: r.Type, }, @@ -106,7 +107,7 @@ func (pm *proxyManager) AddResource(ctx context.Context, lease leases.Lease, r l func (pm *proxyManager) DeleteResource(ctx context.Context, lease leases.Lease, r leases.Resource) error { _, err := pm.client.DeleteResource(ctx, &leasesapi.DeleteResourceRequest{ ID: lease.ID, - Resource: leasesapi.Resource{ + Resource: &leasesapi.Resource{ ID: r.ID, Type: r.Type, }, diff --git a/vendor/github.com/containerd/containerd/log/context.go b/vendor/github.com/containerd/containerd/log/context.go index 0db9562b82bba..92cfcd91aea62 100644 --- a/vendor/github.com/containerd/containerd/log/context.go +++ b/vendor/github.com/containerd/containerd/log/context.go @@ -35,6 +35,9 @@ var ( type ( loggerKey struct{} + + // Fields type to pass to `WithFields`, alias from `logrus`. + Fields = logrus.Fields ) const ( diff --git a/vendor/github.com/containerd/containerd/metadata/adaptors.go b/vendor/github.com/containerd/containerd/metadata/adaptors.go index c5d576f844a10..dbff7bacd9a67 100644 --- a/vendor/github.com/containerd/containerd/metadata/adaptors.go +++ b/vendor/github.com/containerd/containerd/metadata/adaptors.go @@ -24,6 +24,7 @@ import ( "github.com/containerd/containerd/filters" "github.com/containerd/containerd/images" "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/sandbox" "github.com/containerd/containerd/snapshots" ) @@ -149,6 +150,23 @@ func adaptSnapshot(info snapshots.Info) filters.Adaptor { }) } +func adaptSandbox(instance *sandbox.Sandbox) filters.Adaptor { + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "id": + return instance.ID, true + case "labels": + return checkMap(fieldpath[1:], instance.Labels) + default: + return "", false + } + }) +} + func checkMap(fieldpath []string, m map[string]string) (string, bool) { if len(m) == 0 { return "", false diff --git a/vendor/github.com/containerd/containerd/metadata/boltutil/helpers.go b/vendor/github.com/containerd/containerd/metadata/boltutil/helpers.go index 4722a522692e8..8f8df33615148 100644 --- a/vendor/github.com/containerd/containerd/metadata/boltutil/helpers.go +++ b/vendor/github.com/containerd/containerd/metadata/boltutil/helpers.go @@ -20,8 +20,10 @@ import ( "fmt" "time" - "github.com/gogo/protobuf/proto" - "github.com/gogo/protobuf/types" + "github.com/containerd/containerd/protobuf" + "github.com/containerd/containerd/protobuf/proto" + "github.com/containerd/containerd/protobuf/types" + "github.com/containerd/typeurl/v2" bolt "go.etcd.io/bbolt" ) @@ -151,7 +153,7 @@ func WriteTimestamps(bkt *bolt.Bucket, created, updated time.Time) error { // WriteExtensions will write a KV map to the given bucket, // where `K` is a string key and `V` is a protobuf's Any type that represents a generic extension. -func WriteExtensions(bkt *bolt.Bucket, extensions map[string]types.Any) error { +func WriteExtensions(bkt *bolt.Bucket, extensions map[string]typeurl.Any) error { if len(extensions) == 0 { return nil } @@ -162,7 +164,8 @@ func WriteExtensions(bkt *bolt.Bucket, extensions map[string]types.Any) error { } for name, ext := range extensions { - p, err := proto.Marshal(&ext) + ext := protobuf.FromAny(ext) + p, err := proto.Marshal(ext) if err != nil { return err } @@ -176,9 +179,9 @@ func WriteExtensions(bkt *bolt.Bucket, extensions map[string]types.Any) error { } // ReadExtensions will read back a map of extensions from the given bucket, previously written by WriteExtensions -func ReadExtensions(bkt *bolt.Bucket) (map[string]types.Any, error) { +func ReadExtensions(bkt *bolt.Bucket) (map[string]typeurl.Any, error) { var ( - extensions = make(map[string]types.Any) + extensions = make(map[string]typeurl.Any) ebkt = bkt.Bucket(bucketKeyExtensions) ) @@ -192,7 +195,7 @@ func ReadExtensions(bkt *bolt.Bucket) (map[string]types.Any, error) { return err } - extensions[string(k)] = t + extensions[string(k)] = &t return nil }); err != nil { return nil, err @@ -202,18 +205,19 @@ func ReadExtensions(bkt *bolt.Bucket) (map[string]types.Any, error) { } // WriteAny write a protobuf's Any type to the bucket -func WriteAny(bkt *bolt.Bucket, name []byte, any *types.Any) error { - if any == nil { +func WriteAny(bkt *bolt.Bucket, name []byte, any typeurl.Any) error { + pbany := protobuf.FromAny(any) + if pbany == nil { return nil } - data, err := proto.Marshal(any) + data, err := proto.Marshal(pbany) if err != nil { - return err + return fmt.Errorf("failed to marshal: %w", err) } if err := bkt.Put(name, data); err != nil { - return err + return fmt.Errorf("put failed: %w", err) } return nil diff --git a/vendor/github.com/containerd/containerd/metadata/buckets.go b/vendor/github.com/containerd/containerd/metadata/buckets.go index 516de1fc7a77f..8dcf10f475aa3 100644 --- a/vendor/github.com/containerd/containerd/metadata/buckets.go +++ b/vendor/github.com/containerd/containerd/metadata/buckets.go @@ -44,75 +44,82 @@ // // Below is the current database schema. This should be updated each time // the structure is changed in addition to adding a migration and incrementing -// the database version. Note that `╘══*...*` refers to maps with arbitrary -// keys. +// the database version. +// Notes: // -// ├──version : - Latest version, see migrations -// └──v1 - Schema version bucket -// ╘══*namespace* -// ├──labels -// │  ╘══*key* : - Label value -// ├──image -// │  ╘══*image name* -// │   ├──createdat : - Created at -// │   ├──updatedat : - Updated at -// │   ├──target -// │   │  ├──digest : - Descriptor digest -// │   │  ├──mediatype : - Descriptor media type -// │   │  └──size : - Descriptor size -// │   └──labels -// │   ╘══*key* : - Label value -// ├──containers -// │  ╘══*container id* -// │   ├──createdat : - Created at -// │   ├──updatedat : - Updated at -// │   ├──spec : - Proto marshaled spec -// │   ├──image : - Image name -// │   ├──snapshotter : - Snapshotter name -// │   ├──snapshotKey : - Snapshot key -// │   ├──runtime -// │   │  ├──name : - Runtime name -// │   │  ├──extensions -// │   │  │  ╘══*name* : - Proto marshaled extension -// │   │  └──options : - Proto marshaled options -// │   └──labels -// │   ╘══*key* : - Label value -// ├──snapshots -// │  ╘══*snapshotter* -// │   ╘══*snapshot key* -// │    ├──name : - Snapshot name in backend -// │   ├──createdat : - Created at -// │   ├──updatedat : - Updated at -// │    ├──parent : - Parent snapshot name -// │   ├──children -// │   │  ╘══*snapshot key* : - Child snapshot reference -// │   └──labels -// │   ╘══*key* : - Label value -// ├──content -// │  ├──blob -// │  │ ╘══*blob digest* -// │  │ ├──createdat : - Created at -// │  │ ├──updatedat : - Updated at -// │  │   ├──size : - Blob size -// │  │ └──labels -// │  │ ╘══*key* : - Label value -// │  └──ingests -// │   ╘══*ingest reference* -// │    ├──ref : - Ingest reference in backend -// │   ├──expireat : - Time to expire ingest -// │   └──expected : - Expected commit digest -// └──leases -// ╘══*lease id* -//   ├──createdat : - Created at -// ├──labels -// │ ╘══*key* : - Label value -//   ├──snapshots -// │  ╘══*snapshotter* -// │   ╘══*snapshot key* : - Snapshot reference -//   ├──content -// │  ╘══*blob digest* : - Content blob reference -// └──ingests -//   ╘══*ingest reference* : - Content ingest reference +// - `╘══*...*` refers to maps with arbitrary keys +// +// - `version` is a key to a numeric value identifying the minor revisions +// of schema version +// +// - a namespace in a schema bucket cannot be named "version" +// +// └──v1 - Schema version bucket +// ├──version : - Latest version, see migrations +// ╘══*namespace* +// ├──labels +// │  ╘══*key* : - Label value +// ├──image +// │  ╘══*image name* +// │   ├──createdat : - Created at +// │   ├──updatedat : - Updated at +// │   ├──target +// │   │  ├──digest : - Descriptor digest +// │   │  ├──mediatype : - Descriptor media type +// │   │  └──size : - Descriptor size +// │   └──labels +// │   ╘══*key* : - Label value +// ├──containers +// │  ╘══*container id* +// │   ├──createdat : - Created at +// │   ├──updatedat : - Updated at +// │   ├──spec : - Proto marshaled spec +// │   ├──image : - Image name +// │   ├──snapshotter : - Snapshotter name +// │   ├──snapshotKey : - Snapshot key +// │   ├──runtime +// │   │  ├──name : - Runtime name +// │   │  ├──extensions +// │   │  │  ╘══*name* : - Proto marshaled extension +// │   │  └──options : - Proto marshaled options +// │   └──labels +// │   ╘══*key* : - Label value +// ├──snapshots +// │  ╘══*snapshotter* +// │   ╘══*snapshot key* +// │    ├──name : - Snapshot name in backend +// │   ├──createdat : - Created at +// │   ├──updatedat : - Updated at +// │    ├──parent : - Parent snapshot name +// │   ├──children +// │   │  ╘══*snapshot key* : - Child snapshot reference +// │   └──labels +// │   ╘══*key* : - Label value +// ├──content +// │  ├──blob +// │  │ ╘══*blob digest* +// │  │ ├──createdat : - Created at +// │  │ ├──updatedat : - Updated at +// │  │   ├──size : - Blob size +// │  │ └──labels +// │  │ ╘══*key* : - Label value +// │  └──ingests +// │   ╘══*ingest reference* +// │    ├──ref : - Ingest reference in backend +// │   ├──expireat : - Time to expire ingest +// │   └──expected : - Expected commit digest +// └──leases +// ╘══*lease id* +// ├──createdat : - Created at +// ├──labels +// │ ╘══*key* : - Label value +// ├──snapshots +// │  ╘══*snapshotter* +// │   ╘══*snapshot key* : - Snapshot reference +// ├──content +// │  ╘══*blob digest* : - Content blob reference +// └──ingests +// ╘══*ingest reference* : - Content ingest reference package metadata import ( @@ -131,6 +138,7 @@ var ( bucketKeyObjectBlob = []byte("blob") // stores content links bucketKeyObjectIngests = []byte("ingests") // stores ingest objects bucketKeyObjectLeases = []byte("leases") // stores leases + bucketKeyObjectSandboxes = []byte("sandboxes") // stores sandboxes bucketKeyDigest = []byte("digest") bucketKeyMediaType = []byte("mediatype") @@ -150,6 +158,7 @@ var ( bucketKeyExpected = []byte("expected") bucketKeyRef = []byte("ref") bucketKeyExpireAt = []byte("expireat") + bucketKeySandboxID = []byte("sandboxid") deprecatedBucketKeyObjectIngest = []byte("ingest") // stores ingest links, deprecated in v1.2 ) @@ -271,3 +280,19 @@ func createIngestBucket(tx *bolt.Tx, namespace, ref string) (*bolt.Bucket, error func getIngestBucket(tx *bolt.Tx, namespace, ref string) *bolt.Bucket { return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContent, bucketKeyObjectIngests, []byte(ref)) } + +func createSandboxBucket(tx *bolt.Tx, namespace string) (*bolt.Bucket, error) { + return createBucketIfNotExists( + tx, + []byte(namespace), + bucketKeyObjectSandboxes, + ) +} + +func getSandboxBucket(tx *bolt.Tx, namespace string) *bolt.Bucket { + return getBucket( + tx, + []byte(namespace), + bucketKeyObjectSandboxes, + ) +} diff --git a/vendor/github.com/containerd/containerd/metadata/containers.go b/vendor/github.com/containerd/containerd/metadata/containers.go index 97002e5886e1c..d97d9c6cd1aca 100644 --- a/vendor/github.com/containerd/containerd/metadata/containers.go +++ b/vendor/github.com/containerd/containerd/metadata/containers.go @@ -30,8 +30,9 @@ import ( "github.com/containerd/containerd/labels" "github.com/containerd/containerd/metadata/boltutil" "github.com/containerd/containerd/namespaces" - "github.com/gogo/protobuf/proto" - "github.com/gogo/protobuf/types" + "github.com/containerd/containerd/protobuf/proto" + "github.com/containerd/containerd/protobuf/types" + "github.com/containerd/typeurl/v2" bolt "go.etcd.io/bbolt" ) @@ -211,7 +212,7 @@ func (s *containerStore) Update(ctx context.Context, container containers.Contai if strings.HasPrefix(path, "extensions.") { if updated.Extensions == nil { - updated.Extensions = map[string]types.Any{} + updated.Extensions = map[string]typeurl.Any{} } key := strings.TrimPrefix(path, "extensions.") updated.Extensions[key] = container.Extensions[key] @@ -358,6 +359,8 @@ func readContainer(container *containers.Container, bkt *bolt.Bucket) error { } container.Extensions = extensions + case string(bucketKeySandboxID): + container.SandboxID = string(v) } return nil @@ -406,5 +409,9 @@ func writeContainer(bkt *bolt.Bucket, container *containers.Container) error { return err } + if err := bkt.Put(bucketKeySandboxID, []byte(container.SandboxID)); err != nil { + return err + } + return boltutil.WriteLabels(bkt, container.Labels) } diff --git a/vendor/github.com/containerd/containerd/metadata/content.go b/vendor/github.com/containerd/containerd/metadata/content.go index 66d0ee263ef67..2df665fcfce91 100644 --- a/vendor/github.com/containerd/containerd/metadata/content.go +++ b/vendor/github.com/containerd/containerd/metadata/content.go @@ -398,7 +398,7 @@ func (cs *contentStore) Writer(ctx context.Context, opts ...content.WriterOpt) ( return nil } - if cs.shared { + if cs.shared || isSharedContent(tx, wOpts.Desc.Digest) { if st, err := cs.Store.Info(ctx, wOpts.Desc.Digest); err == nil { // Ensure the expected size is the same, it is likely // an error if the size is mismatched but the caller @@ -706,6 +706,33 @@ func (cs *contentStore) checkAccess(ctx context.Context, dgst digest.Digest) err }) } +func isSharedContent(tx *bolt.Tx, dgst digest.Digest) bool { + v1bkt := tx.Bucket(bucketKeyVersion) + if v1bkt == nil { + return false + } + // iterate through each namespace + v1c := v1bkt.Cursor() + for nk, _ := v1c.First(); nk != nil; nk, _ = v1c.Next() { + ns := string(nk) + lbkt := getNamespaceLabelsBucket(tx, ns) + if lbkt == nil { + continue + } + // iterate through each label + lbc := lbkt.Cursor() + for k, v := lbc.First(); k != nil; k, v = lbc.Next() { + if string(k) == labels.LabelSharedNamespace { + if string(v) == "true" && getBlobBucket(tx, ns, dgst) != nil { + return true + } + break + } + } + } + return false +} + func validateInfo(info *content.Info) error { for k, v := range info.Labels { if err := labels.Validate(k, v); err != nil { diff --git a/vendor/github.com/containerd/containerd/metadata/db.go b/vendor/github.com/containerd/containerd/metadata/db.go index 2d9cbf31a6754..8241930a96854 100644 --- a/vendor/github.com/containerd/containerd/metadata/db.go +++ b/vendor/github.com/containerd/containerd/metadata/db.go @@ -26,9 +26,13 @@ import ( "sync/atomic" "time" + eventstypes "github.com/containerd/containerd/api/events" "github.com/containerd/containerd/content" + "github.com/containerd/containerd/events" "github.com/containerd/containerd/gc" "github.com/containerd/containerd/log" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/pkg/cleanup" "github.com/containerd/containerd/snapshots" bolt "go.etcd.io/bbolt" ) @@ -56,9 +60,18 @@ func WithPolicyIsolated(o *dbOptions) { o.shared = false } +// WithEventsPublisher adds an events publisher to the +// metadata db to directly publish events +func WithEventsPublisher(p events.Publisher) DBOpt { + return func(o *dbOptions) { + o.publisher = p + } +} + // dbOptions configure db options. type dbOptions struct { - shared bool + shared bool + publisher events.Publisher } // DB represents a metadata database backed by a bolt @@ -94,6 +107,9 @@ type DB struct { // set indicating whether any dirty flags are set mutationCallbacks []func(bool) + // collectible resources + collectors map[gc.ResourceType]Collector + dbopts dbOptions } @@ -215,8 +231,8 @@ func (m *DB) ContentStore() content.Store { return m.cs } -// Snapshotter returns a namespaced content store for -// the requested snapshotter name proxied to a snapshotter. +// Snapshotter returns a snapshotter for the requested snapshotter name +// proxied to a snapshotter. func (m *DB) Snapshotter(name string) snapshots.Snapshotter { sn, ok := m.ss[name] if !ok { @@ -265,6 +281,63 @@ func (m *DB) RegisterMutationCallback(fn func(bool)) { m.wlock.Unlock() } +// RegisterCollectibleResource registers a resource type which can be +// referenced by metadata resources and garbage collected. +// Collectible Resources are useful ephemeral resources which need to +// be tracked by go away after reboot or process restart. +// +// A few limitations to consider: +// - Collectible Resources cannot reference other resources. +// - A failure to complete collection will not fail the garbage collection, +// however, the resources can be collected in a later run. +// - Collectible Resources must track whether the resource is active and/or +// lease membership. +func (m *DB) RegisterCollectibleResource(t gc.ResourceType, c Collector) { + if t < resourceEnd { + panic("cannot re-register metadata resource") + } else if t >= gc.ResourceMax { + panic("resource type greater than max") + } + + m.wlock.Lock() + defer m.wlock.Unlock() + + if m.collectors == nil { + m.collectors = map[gc.ResourceType]Collector{} + } + + if _, ok := m.collectors[t]; ok { + panic("cannot register collectible type twice") + } + m.collectors[t] = c +} + +// namespacedEvent is used to handle any event for a namespace +type namespacedEvent struct { + namespace string + event interface{} +} + +func (m *DB) publishEvents(events []namespacedEvent) { + ctx := context.Background() + if publisher := m.dbopts.publisher; publisher != nil { + for _, ne := range events { + ctx := namespaces.WithNamespace(ctx, ne.namespace) + var topic string + switch ne.event.(type) { + case *eventstypes.SnapshotRemove: + topic = "/snapshot/remove" + default: + log.G(ctx).WithField("event", ne.event).Debug("unhandled event type from garbage collection removal") + continue + } + if err := publisher.Publish(ctx, topic, ne.event); err != nil { + log.G(ctx).WithError(err).WithField("topic", topic).Debug("publish event failed") + } + } + } +} + // GCStats holds the duration for the different phases of the garbage collector type GCStats struct { MetaD time.Duration @@ -281,13 +354,16 @@ func (s GCStats) Elapsed() time.Duration { func (m *DB) GarbageCollect(ctx context.Context) (gc.Stats, error) { m.wlock.Lock() t1 := time.Now() + c := startGCContext(ctx, m.collectors) - marked, err := m.getMarked(ctx) + marked, err := m.getMarked(ctx, c) // Pass in gc context if err != nil { m.wlock.Unlock() + c.cancel(ctx) return nil, err } + events := []namespacedEvent{} if err := m.db.Update(func(tx *bolt.Tx) error { ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -301,25 +377,43 @@ func (m *DB) GarbageCollect(ctx context.Context) (gc.Stats, error) { if idx := strings.IndexRune(n.Key, '/'); idx > 0 { m.dirtySS[n.Key[:idx]] = struct{}{} } + // queue event to publish after successful commit } else if n.Type == ResourceContent || n.Type == ResourceIngest { m.dirtyCS = true } - return remove(ctx, tx, n) + + event, err := c.remove(ctx, tx, n) + if event != nil && err == nil { + events = append(events, + namespacedEvent{ + namespace: n.Namespace, + event: event, + }) + } + return err } - if err := scanAll(ctx, tx, rm); err != nil { + if err := c.scanAll(ctx, tx, rm); err != nil { // From gc context return fmt.Errorf("failed to scan and remove: %w", err) } return nil }); err != nil { m.wlock.Unlock() + c.cancel(ctx) return nil, err } var stats GCStats var wg sync.WaitGroup + // Flush events asynchronously after commit + wg.Add(1) + go func() { + m.publishEvents(events) + wg.Done() + }() + // reset dirty, no need for atomic inside of wlock.Lock m.dirty = 0 @@ -331,7 +425,7 @@ func (m *DB) GarbageCollect(ctx context.Context) (gc.Stats, error) { log.G(ctx).WithField("snapshotter", snapshotterName).Debug("schedule snapshotter cleanup") go func(snapshotterName string) { st1 := time.Now() - m.cleanupSnapshotter(snapshotterName) + m.cleanupSnapshotter(ctx, snapshotterName) sl.Lock() stats.SnapshotD[snapshotterName] = time.Since(st1) @@ -348,7 +442,7 @@ func (m *DB) GarbageCollect(ctx context.Context) (gc.Stats, error) { log.G(ctx).Debug("schedule content cleanup") go func() { ct1 := time.Now() - m.cleanupContent() + m.cleanupContent(ctx) stats.ContentD = time.Since(ct1) wg.Done() }() @@ -358,13 +452,15 @@ func (m *DB) GarbageCollect(ctx context.Context) (gc.Stats, error) { stats.MetaD = time.Since(t1) m.wlock.Unlock() + c.finish(ctx) + wg.Wait() return stats, err } // getMarked returns all resources that are used. -func (m *DB) getMarked(ctx context.Context) (map[gc.Node]struct{}, error) { +func (m *DB) getMarked(ctx context.Context, c *gcContext) (map[gc.Node]struct{}, error) { var marked map[gc.Node]struct{} if err := m.db.View(func(tx *bolt.Tx) error { ctx, cancel := context.WithCancel(ctx) @@ -383,7 +479,7 @@ func (m *DB) getMarked(ctx context.Context) (map[gc.Node]struct{}, error) { } }() // Call roots - if err := scanRoots(ctx, tx, roots); err != nil { + if err := c.scanRoots(ctx, tx, roots); err != nil { // From gc context cancel() return err } @@ -392,7 +488,7 @@ func (m *DB) getMarked(ctx context.Context) (map[gc.Node]struct{}, error) { refs := func(n gc.Node) ([]gc.Node, error) { var sn []gc.Node - if err := references(ctx, tx, n, func(nn gc.Node) { + if err := c.references(ctx, tx, n, func(nn gc.Node) { // From gc context sn = append(sn, nn) }); err != nil { return nil, err @@ -412,8 +508,8 @@ func (m *DB) getMarked(ctx context.Context) (map[gc.Node]struct{}, error) { return marked, nil } -func (m *DB) cleanupSnapshotter(name string) (time.Duration, error) { - ctx := context.Background() +func (m *DB) cleanupSnapshotter(ctx context.Context, name string) (time.Duration, error) { + ctx = cleanup.Background(ctx) sn, ok := m.ss[name] if !ok { return 0, nil @@ -429,8 +525,8 @@ func (m *DB) cleanupSnapshotter(name string) (time.Duration, error) { return d, err } -func (m *DB) cleanupContent() (time.Duration, error) { - ctx := context.Background() +func (m *DB) cleanupContent(ctx context.Context) (time.Duration, error) { + ctx = cleanup.Background(ctx) if m.cs == nil { return 0, nil } diff --git a/vendor/github.com/containerd/containerd/metadata/gc.go b/vendor/github.com/containerd/containerd/metadata/gc.go index 60bf410a6dd87..5518a44872a4f 100644 --- a/vendor/github.com/containerd/containerd/metadata/gc.go +++ b/vendor/github.com/containerd/containerd/metadata/gc.go @@ -20,9 +20,11 @@ import ( "bytes" "context" "fmt" + "sort" "strings" "time" + eventstypes "github.com/containerd/containerd/api/events" "github.com/containerd/containerd/gc" "github.com/containerd/containerd/log" bolt "go.etcd.io/bbolt" @@ -43,6 +45,10 @@ const ( ResourceLease // ResourceIngest specifies a content ingest ResourceIngest + // resourceEnd is the end of specified resource types + resourceEnd + // ResourceStream specifies a stream + ResourceStream ) const ( @@ -52,15 +58,162 @@ const ( var ( labelGCRoot = []byte("containerd.io/gc.root") + labelGCRef = []byte("containerd.io/gc.ref.") labelGCSnapRef = []byte("containerd.io/gc.ref.snapshot.") labelGCContentRef = []byte("containerd.io/gc.ref.content") labelGCExpire = []byte("containerd.io/gc.expire") labelGCFlat = []byte("containerd.io/gc.flat") ) +// CollectionContext manages a resource collection during a single run of +// the garbage collector. The context is responsible for managing access to +// resources as well as tracking removal. +// Implementations should defer any longer running operations to the Finish +// function and optimize other functions for running fast during garbage +// collection write locks. +type CollectionContext interface { + // Sends all known resources + All(func(gc.Node)) + + // Active sends all active resources + // Leased resources may be excluded since lease ownership should take + // precedence over active status. + Active(namespace string, fn func(gc.Node)) + + // Leased sends all resources associated with the given lease + Leased(namespace, lease string, fn func(gc.Node)) + + // Remove marks the given resource as removed + Remove(gc.Node) + + // Cancel is called to cleanup a context after a failed collection + Cancel() error + + // Finish is called to cleanup a context after a successful collection + Finish() error +} + +// Collector is an interface to manage resource collection for any collectible +// resource registered for garbage collection. +type Collector interface { + StartCollection(context.Context) (CollectionContext, error) + + ReferenceLabel() string +} + +type gcContext struct { + labelHandlers []referenceLabelHandler + contexts map[gc.ResourceType]CollectionContext +} + +type referenceLabelHandler struct { + key []byte + fn func(string, []byte, []byte, func(gc.Node)) +} + +func startGCContext(ctx context.Context, collectors map[gc.ResourceType]Collector) *gcContext { + var contexts map[gc.ResourceType]CollectionContext + labelHandlers := []referenceLabelHandler{ + { + key: labelGCContentRef, + fn: func(ns string, k, v []byte, fn func(gc.Node)) { + if ks := string(k); ks != string(labelGCContentRef) { + // Allow reference naming separated by . or /, ignore names + if ks[len(labelGCContentRef)] != '.' && ks[len(labelGCContentRef)] != '/' { + return + } + } + + fn(gcnode(ResourceContent, ns, string(v))) + }, + }, + { + key: labelGCSnapRef, + fn: func(ns string, k, v []byte, fn func(gc.Node)) { + snapshotter := k[len(labelGCSnapRef):] + if i := bytes.IndexByte(snapshotter, '/'); i >= 0 { + snapshotter = snapshotter[:i] + } + fn(gcnode(ResourceSnapshot, ns, fmt.Sprintf("%s/%s", snapshotter, v))) + }, + }, + } + if len(collectors) > 0 { + contexts = map[gc.ResourceType]CollectionContext{} + for rt, collector := range collectors { + rt := rt + c, err := collector.StartCollection(ctx) + if err != nil { + // Only skipping this resource this round + continue + } + + if reflabel := collector.ReferenceLabel(); reflabel != "" { + key := append(labelGCRef, reflabel...) + labelHandlers = append(labelHandlers, referenceLabelHandler{ + key: key, + fn: func(ns string, k, v []byte, fn func(gc.Node)) { + if ks := string(k); ks != string(key) { + // Allow reference naming separated by . or /, ignore names + if ks[len(key)] != '.' && ks[len(key)] != '/' { + return + } + } + + fn(gcnode(rt, ns, string(v))) + }, + }) + } + contexts[rt] = c + } + // Sort labelHandlers to ensure key seeking is always forwardS + sort.Slice(labelHandlers, func(i, j int) bool { + return bytes.Compare(labelHandlers[i].key, labelHandlers[j].key) < 0 + }) + } + return &gcContext{ + labelHandlers: labelHandlers, + contexts: contexts, + } +} + +func (c *gcContext) all(fn func(gc.Node)) { + for _, gctx := range c.contexts { + gctx.All(fn) + } +} + +func (c *gcContext) active(namespace string, fn func(gc.Node)) { + for _, gctx := range c.contexts { + gctx.Active(namespace, fn) + } +} + +func (c *gcContext) leased(namespace, lease string, fn func(gc.Node)) { + for _, gctx := range c.contexts { + gctx.Leased(namespace, lease, fn) + } +} + +func (c *gcContext) cancel(ctx context.Context) { + for _, gctx := range c.contexts { + if err := gctx.Cancel(); err != nil { + log.G(ctx).WithError(err).Error("failed to cancel collection context") + } + } +} + +func (c *gcContext) finish(ctx context.Context) { + for _, gctx := range c.contexts { + if err := gctx.Finish(); err != nil { + log.G(ctx).WithError(err).Error("failed to finish collection context") + } + } +} + // scanRoots sends the given channel "root" resources that are certainly used. // The caller could look the references of the resources to find all resources that are used. -func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { +func (c *gcContext) scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { v1bkt := tx.Bucket(bucketKeyVersion) if v1bkt == nil { return nil @@ -170,6 +323,8 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { } } + c.leased(ns, string(k), fn) + return nil }); err != nil { return err @@ -188,7 +343,7 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { contentKey := string(target.Get(bucketKeyDigest)) fn(gcnode(ResourceContent, ns, contentKey)) } - return sendLabelRefs(ns, ibkt.Bucket(k), fn) + return c.sendLabelRefs(ns, ibkt.Bucket(k), fn) }); err != nil { return err } @@ -247,7 +402,7 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { fn(gcnode(ResourceSnapshot, ns, fmt.Sprintf("%s/%s", snapshotter, ss))) } - return sendLabelRefs(ns, cibkt, fn) + return c.sendLabelRefs(ns, cibkt, fn) }); err != nil { return err } @@ -274,12 +429,28 @@ func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error { return err } } + + bbkt := nbkt.Bucket(bucketKeyObjectSandboxes) + if bbkt != nil { + if err := bbkt.ForEach(func(k, v []byte) error { + if v != nil { + return nil + } + + sbbkt := bbkt.Bucket(k) + return c.sendLabelRefs(ns, sbbkt, fn) + }); err != nil { + return err + } + } + + c.active(ns, fn) } return cerr } // references finds the resources that are reachable from the given node. -func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node)) error { +func (c *gcContext) references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node)) error { switch node.Type { case ResourceContent: bkt := getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectContent, bucketKeyObjectBlob, []byte(node.Key)) @@ -288,15 +459,12 @@ func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node) return nil } - return sendLabelRefs(node.Namespace, bkt, fn) + return c.sendLabelRefs(node.Namespace, bkt, fn) case ResourceSnapshot, resourceSnapshotFlat: - parts := strings.SplitN(node.Key, "/", 2) - if len(parts) != 2 { + ss, name, ok := strings.Cut(node.Key, "/") + if !ok { return fmt.Errorf("invalid snapshot gc key %s", node.Key) } - ss := parts[0] - name := parts[1] - bkt := getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectSnapshots, []byte(ss), []byte(name)) if bkt == nil { // Node may be created from dead edge @@ -312,7 +480,7 @@ func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node) return nil } - return sendLabelRefs(node.Namespace, bkt, fn) + return c.sendLabelRefs(node.Namespace, bkt, fn) case ResourceIngest: // Send expected value bkt := getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectContent, bucketKeyObjectIngests, []byte(node.Key)) @@ -332,7 +500,7 @@ func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node) } // scanAll finds all resources regardless whether the resources are used or not. -func scanAll(ctx context.Context, tx *bolt.Tx, fn func(ctx context.Context, n gc.Node) error) error { +func (c *gcContext) scanAll(ctx context.Context, tx *bolt.Tx, fn func(ctx context.Context, n gc.Node) error) error { v1bkt := tx.Bucket(bucketKeyVersion) if v1bkt == nil { return nil @@ -409,19 +577,27 @@ func scanAll(ctx context.Context, tx *bolt.Tx, fn func(ctx context.Context, n gc } } + c.all(func(n gc.Node) { + _ = fn(ctx, n) + }) + return nil } // remove all buckets for the given node. -func remove(ctx context.Context, tx *bolt.Tx, node gc.Node) error { +func (c *gcContext) remove(ctx context.Context, tx *bolt.Tx, node gc.Node) (interface{}, error) { v1bkt := tx.Bucket(bucketKeyVersion) if v1bkt == nil { - return nil + return nil, nil } nsbkt := v1bkt.Bucket([]byte(node.Namespace)) if nsbkt == nil { - return nil + // Still remove object if refenced outside the db + if cc, ok := c.contexts[node.Type]; ok { + cc.Remove(node) + } + return nil, nil } switch node.Type { @@ -432,25 +608,28 @@ func remove(ctx context.Context, tx *bolt.Tx, node gc.Node) error { } if cbkt != nil { log.G(ctx).WithField("key", node.Key).Debug("remove content") - return cbkt.DeleteBucket([]byte(node.Key)) + return nil, cbkt.DeleteBucket([]byte(node.Key)) } case ResourceSnapshot: sbkt := nsbkt.Bucket(bucketKeyObjectSnapshots) if sbkt != nil { - parts := strings.SplitN(node.Key, "/", 2) - if len(parts) != 2 { - return fmt.Errorf("invalid snapshot gc key %s", node.Key) + ss, key, ok := strings.Cut(node.Key, "/") + if !ok { + return nil, fmt.Errorf("invalid snapshot gc key %s", node.Key) } - ssbkt := sbkt.Bucket([]byte(parts[0])) + ssbkt := sbkt.Bucket([]byte(ss)) if ssbkt != nil { - log.G(ctx).WithField("key", parts[1]).WithField("snapshotter", parts[0]).Debug("remove snapshot") - return ssbkt.DeleteBucket([]byte(parts[1])) + log.G(ctx).WithField("key", key).WithField("snapshotter", ss).Debug("remove snapshot") + return &eventstypes.SnapshotRemove{ + Key: key, + Snapshotter: ss, + }, ssbkt.DeleteBucket([]byte(key)) } } case ResourceLease: lbkt := nsbkt.Bucket(bucketKeyObjectLeases) if lbkt != nil { - return lbkt.DeleteBucket([]byte(node.Key)) + return nil, lbkt.DeleteBucket([]byte(node.Key)) } case ResourceIngest: ibkt := nsbkt.Bucket(bucketKeyObjectContent) @@ -459,39 +638,31 @@ func remove(ctx context.Context, tx *bolt.Tx, node gc.Node) error { } if ibkt != nil { log.G(ctx).WithField("ref", node.Key).Debug("remove ingest") - return ibkt.DeleteBucket([]byte(node.Key)) + return nil, ibkt.DeleteBucket([]byte(node.Key)) + } + default: + cc, ok := c.contexts[node.Type] + if ok { + cc.Remove(node) + } else { + log.G(ctx).WithField("ref", node.Key).WithField("type", node.Type).Info("no remove defined for resource") } } - return nil + return nil, nil } // sendLabelRefs sends all snapshot and content references referred to by the labels in the bkt -func sendLabelRefs(ns string, bkt *bolt.Bucket, fn func(gc.Node)) error { +func (c *gcContext) sendLabelRefs(ns string, bkt *bolt.Bucket, fn func(gc.Node)) error { lbkt := bkt.Bucket(bucketKeyObjectLabels) if lbkt != nil { lc := lbkt.Cursor() - - labelRef := string(labelGCContentRef) - for k, v := lc.Seek(labelGCContentRef); k != nil && strings.HasPrefix(string(k), labelRef); k, v = lc.Next() { - if ks := string(k); ks != labelRef { - // Allow reference naming separated by . or /, ignore names - if ks[len(labelRef)] != '.' && ks[len(labelRef)] != '/' { - continue - } + for i := range c.labelHandlers { + labelRef := string(c.labelHandlers[i].key) + for k, v := lc.Seek(c.labelHandlers[i].key); k != nil && strings.HasPrefix(string(k), labelRef); k, v = lc.Next() { + c.labelHandlers[i].fn(ns, k, v, fn) } - - fn(gcnode(ResourceContent, ns, string(v))) } - - for k, v := lc.Seek(labelGCSnapRef); k != nil && strings.HasPrefix(string(k), string(labelGCSnapRef)); k, v = lc.Next() { - snapshotter := k[len(labelGCSnapRef):] - if i := bytes.IndexByte(snapshotter, '/'); i >= 0 { - snapshotter = snapshotter[:i] - } - fn(gcnode(ResourceSnapshot, ns, fmt.Sprintf("%s/%s", snapshotter, v))) - } - } return nil } diff --git a/vendor/github.com/containerd/containerd/metadata/images.go b/vendor/github.com/containerd/containerd/metadata/images.go index 8355b712ef9cc..4f074dc058e87 100644 --- a/vendor/github.com/containerd/containerd/metadata/images.go +++ b/vendor/github.com/containerd/containerd/metadata/images.go @@ -31,6 +31,7 @@ import ( "github.com/containerd/containerd/labels" "github.com/containerd/containerd/metadata/boltutil" "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/pkg/epoch" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" bolt "go.etcd.io/bbolt" @@ -144,7 +145,11 @@ func (s *imageStore) Create(ctx context.Context, image images.Image) (images.Ima return fmt.Errorf("image %q: %w", image.Name, errdefs.ErrAlreadyExists) } - image.CreatedAt = time.Now().UTC() + if tm := epoch.FromContext(ctx); tm != nil { + image.CreatedAt = tm.UTC() + } else { + image.CreatedAt = time.Now().UTC() + } image.UpdatedAt = image.CreatedAt return writeImage(ibkt, &image) }); err != nil { @@ -228,7 +233,11 @@ func (s *imageStore) Update(ctx context.Context, image images.Image, fieldpaths } updated.CreatedAt = createdat - updated.UpdatedAt = time.Now().UTC() + if tm := epoch.FromContext(ctx); tm != nil { + updated.UpdatedAt = tm.UTC() + } else { + updated.UpdatedAt = time.Now().UTC() + } return writeImage(ibkt, &updated) }); err != nil { return images.Image{}, err diff --git a/vendor/github.com/containerd/containerd/metadata/sandbox.go b/vendor/github.com/containerd/containerd/metadata/sandbox.go new file mode 100644 index 0000000000000..5766647d33e3e --- /dev/null +++ b/vendor/github.com/containerd/containerd/metadata/sandbox.go @@ -0,0 +1,373 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package metadata + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/filters" + "github.com/containerd/containerd/identifiers" + "github.com/containerd/containerd/metadata/boltutil" + "github.com/containerd/containerd/namespaces" + api "github.com/containerd/containerd/sandbox" + "github.com/containerd/typeurl/v2" + "go.etcd.io/bbolt" +) + +type sandboxStore struct { + db *DB +} + +var _ api.Store = (*sandboxStore)(nil) + +// NewSandboxStore creates a datababase client for sandboxes +func NewSandboxStore(db *DB) api.Store { + return &sandboxStore{db: db} +} + +// Create a sandbox record in the store +func (s *sandboxStore) Create(ctx context.Context, sandbox api.Sandbox) (api.Sandbox, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return api.Sandbox{}, err + } + + sandbox.CreatedAt = time.Now().UTC() + sandbox.UpdatedAt = sandbox.CreatedAt + + if err := s.validate(&sandbox); err != nil { + return api.Sandbox{}, fmt.Errorf("failed to validate sandbox: %w", err) + } + + if err := s.db.Update(func(tx *bbolt.Tx) error { + parent, err := createSandboxBucket(tx, ns) + if err != nil { + return fmt.Errorf("create error: %w", err) + } + + if err := s.write(parent, &sandbox, false); err != nil { + return fmt.Errorf("write error: %w", err) + } + + return nil + }); err != nil { + return api.Sandbox{}, err + } + + return sandbox, nil +} + +// Update the sandbox with the provided sandbox object and fields +func (s *sandboxStore) Update(ctx context.Context, sandbox api.Sandbox, fieldpaths ...string) (api.Sandbox, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return api.Sandbox{}, err + } + + ret := api.Sandbox{} + if err := update(ctx, s.db, func(tx *bbolt.Tx) error { + parent := getSandboxBucket(tx, ns) + if parent == nil { + return fmt.Errorf("no sandbox buckets: %w", errdefs.ErrNotFound) + } + + updated, err := s.read(parent, []byte(sandbox.ID)) + if err != nil { + return err + } + + if len(fieldpaths) == 0 { + fieldpaths = []string{"labels", "extensions", "spec", "runtime"} + + if updated.Runtime.Name != sandbox.Runtime.Name { + return fmt.Errorf("sandbox.Runtime.Name field is immutable: %w", errdefs.ErrInvalidArgument) + } + } + + for _, path := range fieldpaths { + if strings.HasPrefix(path, "labels.") { + if updated.Labels == nil { + updated.Labels = map[string]string{} + } + + key := strings.TrimPrefix(path, "labels.") + updated.Labels[key] = sandbox.Labels[key] + continue + } else if strings.HasPrefix(path, "extensions.") { + if updated.Extensions == nil { + updated.Extensions = map[string]typeurl.Any{} + } + + key := strings.TrimPrefix(path, "extensions.") + updated.Extensions[key] = sandbox.Extensions[key] + continue + } + + switch path { + case "labels": + updated.Labels = sandbox.Labels + case "extensions": + updated.Extensions = sandbox.Extensions + case "runtime": + updated.Runtime = sandbox.Runtime + case "spec": + updated.Spec = sandbox.Spec + default: + return fmt.Errorf("cannot update %q field on sandbox %q: %w", path, sandbox.ID, errdefs.ErrInvalidArgument) + } + } + + updated.UpdatedAt = time.Now().UTC() + + if err := s.validate(&updated); err != nil { + return err + } + + if err := s.write(parent, &updated, true); err != nil { + return err + } + + ret = updated + return nil + }); err != nil { + return api.Sandbox{}, err + } + + return ret, nil +} + +// Get sandbox metadata using the id +func (s *sandboxStore) Get(ctx context.Context, id string) (api.Sandbox, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return api.Sandbox{}, err + } + + ret := api.Sandbox{} + if err := view(ctx, s.db, func(tx *bbolt.Tx) error { + bucket := getSandboxBucket(tx, ns) + if bucket == nil { + return fmt.Errorf("no sandbox buckets: %w", errdefs.ErrNotFound) + } + + out, err := s.read(bucket, []byte(id)) + if err != nil { + return err + } + + ret = out + return nil + }); err != nil { + return api.Sandbox{}, err + } + + return ret, nil +} + +// List returns sandboxes that match one or more of the provided filters +func (s *sandboxStore) List(ctx context.Context, fields ...string) ([]api.Sandbox, error) { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return nil, err + } + + filter, err := filters.ParseAll(fields...) + if err != nil { + return nil, fmt.Errorf("%s: %w", err.Error(), errdefs.ErrInvalidArgument) + } + + var ( + list []api.Sandbox + ) + + if err := view(ctx, s.db, func(tx *bbolt.Tx) error { + bucket := getSandboxBucket(tx, ns) + if bucket == nil { + // We haven't created any sandboxes yet, just return empty list + return nil + } + + if err := bucket.ForEach(func(k, v []byte) error { + info, err := s.read(bucket, k) + if err != nil { + return fmt.Errorf("failed to read bucket %q: %w", string(k), err) + } + + if filter.Match(adaptSandbox(&info)) { + list = append(list, info) + } + + return nil + }); err != nil { + return err + } + + return nil + }); err != nil { + return nil, err + } + + return list, nil +} + +// Delete a sandbox from metadata store using the id +func (s *sandboxStore) Delete(ctx context.Context, id string) error { + ns, err := namespaces.NamespaceRequired(ctx) + if err != nil { + return err + } + + if err := update(ctx, s.db, func(tx *bbolt.Tx) error { + buckets := getSandboxBucket(tx, ns) + if buckets == nil { + return fmt.Errorf("no sandbox buckets: %w", errdefs.ErrNotFound) + } + + if err := buckets.DeleteBucket([]byte(id)); err != nil { + return fmt.Errorf("failed to delete sandbox %q: %w", id, err) + } + + return nil + }); err != nil { + return err + } + + return nil +} + +func (s *sandboxStore) write(parent *bbolt.Bucket, instance *api.Sandbox, overwrite bool) error { + var ( + bucket *bbolt.Bucket + err error + id = []byte(instance.ID) + ) + + if overwrite { + bucket, err = parent.CreateBucketIfNotExists(id) + if err != nil { + return err + } + } else { + bucket = parent.Bucket(id) + if bucket != nil { + return fmt.Errorf("sandbox bucket %q already exists: %w", instance.ID, errdefs.ErrAlreadyExists) + } + + bucket, err = parent.CreateBucket(id) + if err != nil { + return err + } + } + + if err := boltutil.WriteTimestamps(bucket, instance.CreatedAt, instance.UpdatedAt); err != nil { + return err + } + + if err := boltutil.WriteLabels(bucket, instance.Labels); err != nil { + return err + } + + if err := boltutil.WriteExtensions(bucket, instance.Extensions); err != nil { + return err + } + + if err := boltutil.WriteAny(bucket, bucketKeySpec, instance.Spec); err != nil { + return err + } + + runtimeBucket, err := bucket.CreateBucketIfNotExists(bucketKeyRuntime) + if err != nil { + return err + } + + if err := runtimeBucket.Put(bucketKeyName, []byte(instance.Runtime.Name)); err != nil { + return err + } + + if err := boltutil.WriteAny(runtimeBucket, bucketKeyOptions, instance.Runtime.Options); err != nil { + return err + } + + return nil +} + +func (s *sandboxStore) read(parent *bbolt.Bucket, id []byte) (api.Sandbox, error) { + var ( + inst api.Sandbox + err error + ) + + bucket := parent.Bucket(id) + if bucket == nil { + return api.Sandbox{}, fmt.Errorf("bucket %q not found: %w", id, errdefs.ErrNotFound) + } + + inst.ID = string(id) + + inst.Labels, err = boltutil.ReadLabels(bucket) + if err != nil { + return api.Sandbox{}, err + } + + if err := boltutil.ReadTimestamps(bucket, &inst.CreatedAt, &inst.UpdatedAt); err != nil { + return api.Sandbox{}, err + } + + inst.Spec, err = boltutil.ReadAny(bucket, bucketKeySpec) + if err != nil { + return api.Sandbox{}, err + } + + runtimeBucket := bucket.Bucket(bucketKeyRuntime) + if runtimeBucket == nil { + return api.Sandbox{}, errors.New("no runtime bucket") + } + + inst.Runtime.Name = string(runtimeBucket.Get(bucketKeyName)) + inst.Runtime.Options, err = boltutil.ReadAny(runtimeBucket, bucketKeyOptions) + if err != nil { + return api.Sandbox{}, err + } + + inst.Extensions, err = boltutil.ReadExtensions(bucket) + if err != nil { + return api.Sandbox{}, err + } + + return inst, nil +} + +func (s *sandboxStore) validate(new *api.Sandbox) error { + if err := identifiers.Validate(new.ID); err != nil { + return fmt.Errorf("invalid sandbox ID: %w", err) + } + + if new.CreatedAt.IsZero() { + return fmt.Errorf("creation date must not be zero: %w", errdefs.ErrInvalidArgument) + } + + if new.UpdatedAt.IsZero() { + return fmt.Errorf("updated date must not be zero: %w", errdefs.ErrInvalidArgument) + } + + return nil +} diff --git a/vendor/github.com/containerd/containerd/metadata/snapshot.go b/vendor/github.com/containerd/containerd/metadata/snapshot.go index 348602093a616..e7774d36ec8f2 100644 --- a/vendor/github.com/containerd/containerd/metadata/snapshot.go +++ b/vendor/github.com/containerd/containerd/metadata/snapshot.go @@ -24,6 +24,7 @@ import ( "sync/atomic" "time" + eventstypes "github.com/containerd/containerd/api/events" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/filters" "github.com/containerd/containerd/labels" @@ -273,7 +274,22 @@ func (s *snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, er } func (s *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { - return s.createSnapshot(ctx, key, parent, false, opts) + mounts, err := s.createSnapshot(ctx, key, parent, false, opts) + if err != nil { + return nil, err + } + + if s.db.dbopts.publisher != nil { + if err := s.db.dbopts.publisher.Publish(ctx, "/snapshot/prepare", &eventstypes.SnapshotPrepare{ + Key: key, + Parent: parent, + Snapshotter: s.name, + }); err != nil { + return nil, err + } + } + + return mounts, nil } func (s *snapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) { @@ -618,6 +634,16 @@ func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snap return err } + if s.db.dbopts.publisher != nil { + if err := s.db.dbopts.publisher.Publish(ctx, "/snapshot/commit", &eventstypes.SnapshotCommit{ + Key: key, + Name: name, + Snapshotter: s.name, + }); err != nil { + return err + } + } + return nil } @@ -631,7 +657,7 @@ func (s *snapshotter) Remove(ctx context.Context, key string) error { return err } - return update(ctx, s.db, func(tx *bolt.Tx) error { + if err := update(ctx, s.db, func(tx *bolt.Tx) error { var sbkt *bolt.Bucket bkt := getSnapshotterBucket(tx, ns, s.name) if bkt != nil { @@ -674,7 +700,17 @@ func (s *snapshotter) Remove(ctx context.Context, key string) error { s.db.dirtySS[s.name] = struct{}{} return nil - }) + }); err != nil { + return err + } + + if s.db.dbopts.publisher != nil { + return s.db.dbopts.publisher.Publish(ctx, "/snapshot/remove", &eventstypes.SnapshotRemove{ + Key: key, + Snapshotter: s.name, + }) + } + return nil } type infoPair struct { diff --git a/vendor/github.com/containerd/containerd/mount/fmountat_linux.go b/vendor/github.com/containerd/containerd/mount/fmountat_linux.go deleted file mode 100644 index 850a92acf6c5c..0000000000000 --- a/vendor/github.com/containerd/containerd/mount/fmountat_linux.go +++ /dev/null @@ -1,145 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package mount - -import ( - "fmt" - "runtime" - "syscall" - "unsafe" - - "github.com/containerd/containerd/log" - "golang.org/x/sys/unix" -) - -// fMountat performs mount from the provided directory. -func fMountat(dirfd uintptr, source, target, fstype string, flags uintptr, data string) error { - var ( - sourceP, targetP, fstypeP, dataP *byte - pid uintptr - err error - errno, status syscall.Errno - ) - - sourceP, err = syscall.BytePtrFromString(source) - if err != nil { - return err - } - - targetP, err = syscall.BytePtrFromString(target) - if err != nil { - return err - } - - fstypeP, err = syscall.BytePtrFromString(fstype) - if err != nil { - return err - } - - if data != "" { - dataP, err = syscall.BytePtrFromString(data) - if err != nil { - return err - } - } - - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - var pipefds [2]int - if err := syscall.Pipe2(pipefds[:], syscall.O_CLOEXEC); err != nil { - return fmt.Errorf("failed to open pipe: %w", err) - } - - defer func() { - // close both ends of the pipe in a deferred function, since open file - // descriptor table is shared with child - syscall.Close(pipefds[0]) - syscall.Close(pipefds[1]) - }() - - pid, errno = forkAndMountat(dirfd, - uintptr(unsafe.Pointer(sourceP)), - uintptr(unsafe.Pointer(targetP)), - uintptr(unsafe.Pointer(fstypeP)), - flags, - uintptr(unsafe.Pointer(dataP)), - pipefds[1], - ) - - if errno != 0 { - return fmt.Errorf("failed to fork thread: %w", errno) - } - - defer func() { - _, err := unix.Wait4(int(pid), nil, 0, nil) - for err == syscall.EINTR { - _, err = unix.Wait4(int(pid), nil, 0, nil) - } - - if err != nil { - log.L.WithError(err).Debugf("failed to find pid=%d process", pid) - } - }() - - _, _, errno = syscall.RawSyscall(syscall.SYS_READ, - uintptr(pipefds[0]), - uintptr(unsafe.Pointer(&status)), - unsafe.Sizeof(status)) - if errno != 0 { - return fmt.Errorf("failed to read pipe: %w", errno) - } - - if status != 0 { - return fmt.Errorf("failed to mount: %w", status) - } - - return nil -} - -// forkAndMountat will fork thread, change working dir and mount. -// -// precondition: the runtime OS thread must be locked. -func forkAndMountat(dirfd uintptr, source, target, fstype, flags, data uintptr, pipefd int) (pid uintptr, errno syscall.Errno) { - - // block signal during clone - beforeFork() - - // the cloned thread shares the open file descriptor, but the thread - // never be reused by runtime. - pid, _, errno = syscall.RawSyscall6(syscall.SYS_CLONE, uintptr(syscall.SIGCHLD)|syscall.CLONE_FILES, 0, 0, 0, 0, 0) - if errno != 0 || pid != 0 { - // restore all signals - afterFork() - return - } - - // restore all signals - afterForkInChild() - - // change working dir - _, _, errno = syscall.RawSyscall(syscall.SYS_FCHDIR, dirfd, 0, 0) - if errno != 0 { - goto childerr - } - _, _, errno = syscall.RawSyscall6(syscall.SYS_MOUNT, source, target, fstype, flags, data, 0) - -childerr: - _, _, errno = syscall.RawSyscall(syscall.SYS_WRITE, uintptr(pipefd), uintptr(unsafe.Pointer(&errno)), unsafe.Sizeof(errno)) - syscall.RawSyscall(syscall.SYS_EXIT, uintptr(errno), 0, 0) - panic("unreachable") -} diff --git a/vendor/github.com/containerd/containerd/mount/lookup_unix.go b/vendor/github.com/containerd/containerd/mount/lookup_unix.go index 44881750b265f..6fb16f6dd1820 100644 --- a/vendor/github.com/containerd/containerd/mount/lookup_unix.go +++ b/vendor/github.com/containerd/containerd/mount/lookup_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/mount/lookup_unsupported.go b/vendor/github.com/containerd/containerd/mount/lookup_unsupported.go index 2e954b1ae5827..1daf96d5c93f4 100644 --- a/vendor/github.com/containerd/containerd/mount/lookup_unsupported.go +++ b/vendor/github.com/containerd/containerd/mount/lookup_unsupported.go @@ -1,5 +1,4 @@ //go:build windows -// +build windows /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/mount/losetup_linux.go b/vendor/github.com/containerd/containerd/mount/losetup_linux.go index 9a68017901a22..567ebb4e988e7 100644 --- a/vendor/github.com/containerd/containerd/mount/losetup_linux.go +++ b/vendor/github.com/containerd/containerd/mount/losetup_linux.go @@ -19,13 +19,11 @@ package mount import ( "errors" "fmt" - "math/rand" "os" "strings" - "syscall" "time" - "unsafe" + "github.com/containerd/containerd/pkg/randutil" "golang.org/x/sys/unix" ) @@ -47,22 +45,13 @@ type LoopParams struct { Direct bool } -func ioctl(fd, req, args uintptr) (uintptr, uintptr, error) { - r1, r2, errno := syscall.Syscall(syscall.SYS_IOCTL, fd, req, args) - if errno != 0 { - return 0, 0, errno - } - - return r1, r2, nil -} - func getFreeLoopDev() (uint32, error) { ctrl, err := os.OpenFile(loopControlPath, os.O_RDWR, 0) if err != nil { return 0, fmt.Errorf("could not open %v: %v", loopControlPath, err) } defer ctrl.Close() - num, _, err := ioctl(ctrl.Fd(), unix.LOOP_CTL_GET_FREE, 0) + num, err := unix.IoctlRetInt(int(ctrl.Fd()), unix.LOOP_CTL_GET_FREE) if err != nil { return 0, fmt.Errorf("could not get free loop device: %w", err) } @@ -96,7 +85,7 @@ func setupLoopDev(backingFile, loopDev string, param LoopParams) (_ *os.File, re }() // 2. Set FD - if _, _, err = ioctl(loop.Fd(), unix.LOOP_SET_FD, back.Fd()); err != nil { + if err := unix.IoctlSetInt(int(loop.Fd()), unix.LOOP_SET_FD, int(back.Fd())); err != nil { return nil, fmt.Errorf("could not set loop fd for device: %s: %w", loopDev, err) } @@ -115,7 +104,7 @@ func setupLoopDev(backingFile, loopDev string, param LoopParams) (_ *os.File, re info.Flags |= unix.LO_FLAGS_DIRECT_IO } - _, _, err = ioctl(loop.Fd(), unix.LOOP_SET_STATUS64, uintptr(unsafe.Pointer(&info))) + err = unix.IoctlLoopSetStatus64(int(loop.Fd()), &info) if err == nil { return loop, nil } @@ -124,13 +113,13 @@ func setupLoopDev(backingFile, loopDev string, param LoopParams) (_ *os.File, re // Retry w/o direct IO flag in case kernel does not support it. The downside is that // it will suffer from double cache problem. info.Flags &= ^(uint32(unix.LO_FLAGS_DIRECT_IO)) - _, _, err = ioctl(loop.Fd(), unix.LOOP_SET_STATUS64, uintptr(unsafe.Pointer(&info))) + err = unix.IoctlLoopSetStatus64(int(loop.Fd()), &info) if err == nil { return loop, nil } } - _, _, _ = ioctl(loop.Fd(), unix.LOOP_CLR_FD, 0) + _ = unix.IoctlSetInt(int(loop.Fd()), unix.LOOP_CLR_FD, 0) return nil, fmt.Errorf("failed to set loop device info: %v", err) } @@ -163,7 +152,7 @@ func setupLoop(backingFile string, param LoopParams) (*os.File, error) { // with EBUSY when trying to set it up. if strings.Contains(err.Error(), ebusyString) { // Fallback a bit to avoid live lock - time.Sleep(time.Millisecond * time.Duration(rand.Intn(retry*10))) + time.Sleep(time.Millisecond * time.Duration(randutil.Intn(retry*10))) continue } return nil, err @@ -182,8 +171,7 @@ func removeLoop(loopdev string) error { } defer file.Close() - _, _, err = ioctl(file.Fd(), unix.LOOP_CLR_FD, 0) - return err + return unix.IoctlSetInt(int(file.Fd()), unix.LOOP_CLR_FD, 0) } // AttachLoopDevice attaches a specified backing file to a loop device diff --git a/vendor/github.com/containerd/containerd/mount/mount.go b/vendor/github.com/containerd/containerd/mount/mount.go index b25556b2e0b83..21dd0f90376ff 100644 --- a/vendor/github.com/containerd/containerd/mount/mount.go +++ b/vendor/github.com/containerd/containerd/mount/mount.go @@ -16,6 +16,12 @@ package mount +import ( + "fmt" + + "github.com/containerd/continuity/fs" +) + // Mount is the lingua franca of containerd. A mount represents a // serialized mount syscall. Components either emit or consume mounts. type Mount struct { @@ -24,12 +30,16 @@ type Mount struct { // Source specifies where to mount from. Depending on the host system, this // can be a source path or device. Source string + // Target specifies an optional subdirectory as a mountpoint. It assumes that + // the subdirectory exists in a parent mount. + Target string // Options contains zero or more fstab-style mount options. Typically, // these are platform specific. Options []string } -// All mounts all the provided mounts to the provided target +// All mounts all the provided mounts to the provided target. If submounts are +// present, it assumes that parent mounts come before child mounts. func All(mounts []Mount, target string) error { for _, m := range mounts { if err := m.Mount(target); err != nil { @@ -38,3 +48,30 @@ func All(mounts []Mount, target string) error { } return nil } + +// UnmountMounts unmounts all the mounts under a target in the reverse order of +// the mounts array provided. +func UnmountMounts(mounts []Mount, target string, flags int) error { + for i := len(mounts) - 1; i >= 0; i-- { + mountpoint, err := fs.RootPath(target, mounts[i].Target) + if err != nil { + return err + } + + if err := UnmountAll(mountpoint, flags); err != nil { + if i == len(mounts)-1 { // last mount + return err + } + } + } + return nil +} + +// Mount to the provided target path. +func (m *Mount) Mount(target string) error { + target, err := fs.RootPath(target, m.Target) + if err != nil { + return fmt.Errorf("failed to join path %q with root %q: %w", m.Target, target, err) + } + return m.mount(target) +} diff --git a/vendor/github.com/containerd/containerd/mount/mount_freebsd.go b/vendor/github.com/containerd/containerd/mount/mount_freebsd.go index 3711383c61344..f41df3dea4e24 100644 --- a/vendor/github.com/containerd/containerd/mount/mount_freebsd.go +++ b/vendor/github.com/containerd/containerd/mount/mount_freebsd.go @@ -31,15 +31,12 @@ var ( ErrNotImplementOnUnix = errors.New("not implemented under unix") ) -// Mount to the provided target path. -func (m *Mount) Mount(target string) error { - // The "syscall" and "golang.org/x/sys/unix" packages do not define a Mount - // function for FreeBSD, so instead we execute mount(8) and trust it to do - // the right thing - return m.mountWithHelper(target) -} - -func (m *Mount) mountWithHelper(target string) error { +// Mount to the provided target. +// +// The "syscall" and "golang.org/x/sys/unix" packages do not define a Mount +// function for FreeBSD, so instead we execute mount(8) and trust it to do +// the right thing +func (m *Mount) mount(target string) error { // target: "/foo/target" // command: "mount -o ro -t nullfs /foo/source /foo/merged" // Note: FreeBSD mount(8) is particular about the order of flags and arguments diff --git a/vendor/github.com/containerd/containerd/mount/mount_linux.go b/vendor/github.com/containerd/containerd/mount/mount_linux.go index a69f65c2ddd32..4183333dba26e 100644 --- a/vendor/github.com/containerd/containerd/mount/mount_linux.go +++ b/vendor/github.com/containerd/containerd/mount/mount_linux.go @@ -21,6 +21,7 @@ import ( "fmt" "os" "path" + "runtime" "strings" "time" @@ -41,7 +42,7 @@ func init() { // // If m.Type starts with "fuse." or "fuse3.", "mount.fuse" or "mount.fuse3" // helper binary is called. -func (m *Mount) Mount(target string) (err error) { +func (m *Mount) mount(target string) (err error) { for _, helperBinary := range allowedHelperBinaries { // helperBinary = "mount.fuse", typePrefix = "fuse." typePrefix := strings.TrimPrefix(helperBinary, "mount.") + "." @@ -363,24 +364,29 @@ func mountAt(chdir string, source, target, fstype string, flags uintptr, data st return unix.Mount(source, target, fstype, flags, data) } - f, err := os.Open(chdir) - if err != nil { - return fmt.Errorf("failed to mountat: %w", err) - } - defer f.Close() + ch := make(chan error, 1) + go func() { + runtime.LockOSThread() - fs, err := f.Stat() - if err != nil { - return fmt.Errorf("failed to mountat: %w", err) - } + // Do not unlock this thread. + // If the thread is unlocked go will try to use it for other goroutines. + // However it is not possible to restore the thread state after CLONE_FS. + // + // Once the goroutine exits the thread should eventually be terminated by go. - if !fs.IsDir() { - return fmt.Errorf("failed to mountat: %s is not dir", chdir) - } - if err := fMountat(f.Fd(), source, target, fstype, flags, data); err != nil { - return fmt.Errorf("failed to mountat: %w", err) - } - return nil + if err := unix.Unshare(unix.CLONE_FS); err != nil { + ch <- err + return + } + + if err := unix.Chdir(chdir); err != nil { + ch <- err + return + } + + ch <- unix.Mount(source, target, fstype, flags, data) + }() + return <-ch } func (m *Mount) mountWithHelper(helperBinary, typePrefix, target string) error { diff --git a/vendor/github.com/containerd/containerd/mount/mount_unix.go b/vendor/github.com/containerd/containerd/mount/mount_unix.go index 795bb4bfe1d0b..46dfd1c343915 100644 --- a/vendor/github.com/containerd/containerd/mount/mount_unix.go +++ b/vendor/github.com/containerd/containerd/mount/mount_unix.go @@ -1,5 +1,4 @@ -//go:build darwin || openbsd -// +build darwin openbsd +//go:build !windows && !darwin && !openbsd /* Copyright The containerd Authors. @@ -19,24 +18,44 @@ package mount -import "errors" +import ( + "sort" -var ( - // ErrNotImplementOnUnix is returned for methods that are not implemented - ErrNotImplementOnUnix = errors.New("not implemented under unix") + "github.com/moby/sys/mountinfo" ) -// Mount is not implemented on this platform -func (m *Mount) Mount(target string) error { - return ErrNotImplementOnUnix -} - -// Unmount is not implemented on this platform -func Unmount(mount string, flags int) error { - return ErrNotImplementOnUnix -} - -// UnmountAll is not implemented on this platform -func UnmountAll(mount string, flags int) error { - return ErrNotImplementOnUnix +// UnmountRecursive unmounts the target and all mounts underneath, starting +// with the deepest mount first. +func UnmountRecursive(target string, flags int) error { + if target == "" { + return nil + } + mounts, err := mountinfo.GetMounts(mountinfo.PrefixFilter(target)) + if err != nil { + return err + } + + targetSet := make(map[string]struct{}) + for _, m := range mounts { + targetSet[m.Mountpoint] = struct{}{} + } + + var targets []string + for m := range targetSet { + targets = append(targets, m) + } + + // Make the deepest mount be first + sort.SliceStable(targets, func(i, j int) bool { + return len(targets[i]) > len(targets[j]) + }) + + for i, target := range targets { + if err := UnmountAll(target, flags); err != nil { + if i == len(targets)-1 { // last mount + return err + } + } + } + return nil } diff --git a/vendor/github.com/containerd/containerd/mount/mount_unsupported.go b/vendor/github.com/containerd/containerd/mount/mount_unsupported.go new file mode 100644 index 0000000000000..894467a9933ea --- /dev/null +++ b/vendor/github.com/containerd/containerd/mount/mount_unsupported.go @@ -0,0 +1,46 @@ +//go:build darwin || openbsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package mount + +import "errors" + +var ( + // ErrNotImplementOnUnix is returned for methods that are not implemented + ErrNotImplementOnUnix = errors.New("not implemented under unix") +) + +// Mount is not implemented on this platform +func (m *Mount) mount(target string) error { + return ErrNotImplementOnUnix +} + +// Unmount is not implemented on this platform +func Unmount(mount string, flags int) error { + return ErrNotImplementOnUnix +} + +// UnmountAll is not implemented on this platform +func UnmountAll(mount string, flags int) error { + return ErrNotImplementOnUnix +} + +// UnmountRecursive is not implemented on this platform +func UnmountRecursive(mount string, flags int) error { + return ErrNotImplementOnUnix +} diff --git a/vendor/github.com/containerd/containerd/mount/mount_windows.go b/vendor/github.com/containerd/containerd/mount/mount_windows.go index 87fed8268ead4..b73fe3646f280 100644 --- a/vendor/github.com/containerd/containerd/mount/mount_windows.go +++ b/vendor/github.com/containerd/containerd/mount/mount_windows.go @@ -18,7 +18,6 @@ package mount import ( "encoding/json" - "errors" "fmt" "os" "path/filepath" @@ -27,13 +26,8 @@ import ( "github.com/Microsoft/hcsshim" ) -var ( - // ErrNotImplementOnWindows is returned when an action is not implemented for windows - ErrNotImplementOnWindows = errors.New("not implemented under windows") -) - -// Mount to the provided target -func (m *Mount) Mount(target string) error { +// Mount to the provided target. +func (m *Mount) mount(target string) error { if m.Type != "windows-layer" { return fmt.Errorf("invalid windows mount type: '%s'", m.Type) } @@ -64,8 +58,9 @@ func (m *Mount) Mount(target string) error { return fmt.Errorf("failed to get layer mount path for %s: %w", m.Source, err) } mountPath = mountPath + `\` + if err = os.Symlink(mountPath, target); err != nil { - return fmt.Errorf("failed to link mount to taget %s: %w", target, err) + return fmt.Errorf("failed to link mount to target %s: %w", target, err) } return nil } @@ -111,3 +106,8 @@ func Unmount(mount string, flags int) error { func UnmountAll(mount string, flags int) error { return Unmount(mount, flags) } + +// UnmountRecursive unmounts from the provided path +func UnmountRecursive(mount string, flags int) error { + return UnmountAll(mount, flags) +} diff --git a/vendor/github.com/containerd/containerd/mount/temp.go b/vendor/github.com/containerd/containerd/mount/temp.go index 13eedaf035994..349c2404e084c 100644 --- a/vendor/github.com/containerd/containerd/mount/temp.go +++ b/vendor/github.com/containerd/containerd/mount/temp.go @@ -49,7 +49,7 @@ func WithTempMount(ctx context.Context, mounts []Mount, f func(root string) erro // We should do defer first, if not we will not do Unmount when only a part of Mounts are failed. defer func() { - if uerr = UnmountAll(root, 0); uerr != nil { + if uerr = UnmountMounts(mounts, root, 0); uerr != nil { uerr = fmt.Errorf("failed to unmount %s: %w", root, uerr) if err == nil { err = uerr diff --git a/vendor/github.com/containerd/containerd/mount/temp_unix.go b/vendor/github.com/containerd/containerd/mount/temp_unix.go index e969700818c6a..5ddd2cd1609ff 100644 --- a/vendor/github.com/containerd/containerd/mount/temp_unix.go +++ b/vendor/github.com/containerd/containerd/mount/temp_unix.go @@ -1,5 +1,4 @@ //go:build !windows && !darwin -// +build !windows,!darwin /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/mount/temp_unsupported.go b/vendor/github.com/containerd/containerd/mount/temp_unsupported.go index feec90a76f6b4..3ccc0444ff916 100644 --- a/vendor/github.com/containerd/containerd/mount/temp_unsupported.go +++ b/vendor/github.com/containerd/containerd/mount/temp_unsupported.go @@ -1,5 +1,4 @@ //go:build windows || darwin -// +build windows darwin /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/namespaces.go b/vendor/github.com/containerd/containerd/namespaces.go index 4c66406b084ea..83ee828dd0e3f 100644 --- a/vendor/github.com/containerd/containerd/namespaces.go +++ b/vendor/github.com/containerd/containerd/namespaces.go @@ -23,7 +23,7 @@ import ( api "github.com/containerd/containerd/api/services/namespaces/v1" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/namespaces" - "github.com/gogo/protobuf/types" + "github.com/containerd/containerd/protobuf/types" ) // NewNamespaceStoreFromClient returns a new namespace store @@ -38,7 +38,7 @@ type remoteNamespaces struct { func (r *remoteNamespaces) Create(ctx context.Context, namespace string, labels map[string]string) error { var req api.CreateNamespaceRequest - req.Namespace = api.Namespace{ + req.Namespace = &api.Namespace{ Name: namespace, Labels: labels, } @@ -66,7 +66,7 @@ func (r *remoteNamespaces) Labels(ctx context.Context, namespace string) (map[st func (r *remoteNamespaces) SetLabel(ctx context.Context, namespace, key, value string) error { var req api.UpdateNamespaceRequest - req.Namespace = api.Namespace{ + req.Namespace = &api.Namespace{ Name: namespace, Labels: map[string]string{key: value}, } diff --git a/vendor/github.com/containerd/containerd/oci/mounts.go b/vendor/github.com/containerd/containerd/oci/mounts.go index 83dd0d0b10843..8c758f43332b2 100644 --- a/vendor/github.com/containerd/containerd/oci/mounts.go +++ b/vendor/github.com/containerd/containerd/oci/mounts.go @@ -1,5 +1,4 @@ //go:build !freebsd -// +build !freebsd /* Copyright The containerd Authors. @@ -69,3 +68,6 @@ func defaultMounts() []specs.Mount { }, } } + +// appendOSMounts is only used on FreeBSD, and a no-op on other platforms. +func appendOSMounts(_ *Spec, _ string) {} diff --git a/vendor/github.com/containerd/containerd/oci/mounts_freebsd.go b/vendor/github.com/containerd/containerd/oci/mounts_freebsd.go index 42b9d7affd7e1..6675c55161edd 100644 --- a/vendor/github.com/containerd/containerd/oci/mounts_freebsd.go +++ b/vendor/github.com/containerd/containerd/oci/mounts_freebsd.go @@ -32,7 +32,34 @@ func defaultMounts() []specs.Mount { Destination: "/dev/fd", Type: "fdescfs", Source: "fdescfs", - Options: []string{}, }, } } + +// appendOSMounts modifies the mount spec to mount emulated Linux filesystems on FreeBSD, +// as per: https://wiki.freebsd.org/LinuxJails +func appendOSMounts(s *Spec, os string) { + // No-op for FreeBSD containers + if os != "linux" { + return + } + /* The nosuid noexec options are for consistency with Linux mounts: on FreeBSD it is + by default impossible to execute anything from these filesystems. + */ + var mounts = []specs.Mount{ + { + Destination: "/proc", + Type: "linprocfs", + Source: "linprocfs", + Options: []string{"nosuid", "noexec"}, + }, + { + Destination: "/sys", + Type: "linsysfs", + Source: "linsysfs", + Options: []string{"nosuid", "noexec", "nodev"}, + }, + } + + s.Mounts = append(mounts, s.Mounts...) +} diff --git a/vendor/github.com/containerd/containerd/oci/spec.go b/vendor/github.com/containerd/containerd/oci/spec.go index a1c98ddcbd062..bee3b44d6af03 100644 --- a/vendor/github.com/containerd/containerd/oci/spec.go +++ b/vendor/github.com/containerd/containerd/oci/spec.go @@ -21,11 +21,11 @@ import ( "path/filepath" "runtime" - "github.com/containerd/containerd/namespaces" - "github.com/containerd/containerd/platforms" + "github.com/opencontainers/runtime-spec/specs-go" "github.com/containerd/containerd/containers" - specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/platforms" ) const ( @@ -66,15 +66,19 @@ func generateDefaultSpecWithPlatform(ctx context.Context, platform, id string, s return err } - if plat.OS == "windows" { + switch plat.OS { + case "windows": err = populateDefaultWindowsSpec(ctx, s, id) - } else { + case "darwin": + err = populateDefaultDarwinSpec(s) + default: err = populateDefaultUnixSpec(ctx, s, id) if err == nil && runtime.GOOS == "windows" { // To run LCOW we have a Linux and Windows section. Add an empty one now. s.Windows = &specs.Windows{} } } + return err } @@ -207,3 +211,12 @@ func populateDefaultWindowsSpec(ctx context.Context, s *Spec, id string) error { } return nil } + +func populateDefaultDarwinSpec(s *Spec) error { + *s = Spec{ + Version: specs.Version, + Root: &specs.Root{}, + Process: &specs.Process{Cwd: "/"}, + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts.go b/vendor/github.com/containerd/containerd/oci/spec_opts.go index 3330ad1088ca9..00942995b69e8 100644 --- a/vendor/github.com/containerd/containerd/oci/spec_opts.go +++ b/vendor/github.com/containerd/containerd/oci/spec_opts.go @@ -76,13 +76,16 @@ func setLinux(s *Spec) { } } -// nolint func setResources(s *Spec) { if s.Linux != nil { if s.Linux.Resources == nil { s.Linux.Resources = &specs.LinuxResources{} } } +} + +//nolint:nolintlint,unused // not used on all platforms +func setResourcesWindows(s *Spec) { if s.Windows != nil { if s.Windows.Resources == nil { s.Windows.Resources = &specs.WindowsResources{} @@ -90,7 +93,7 @@ func setResources(s *Spec) { } } -// nolint +//nolint:nolintlint,unused // not used on all platforms func setCPU(s *Spec) { setResources(s) if s.Linux != nil { @@ -98,6 +101,11 @@ func setCPU(s *Spec) { s.Linux.Resources.CPU = &specs.LinuxCPU{} } } +} + +//nolint:nolintlint,unused // not used on all platforms +func setCPUWindows(s *Spec) { + setResourcesWindows(s) if s.Windows != nil { if s.Windows.Resources.CPU == nil { s.Windows.Resources.CPU = &specs.WindowsCPUResources{} @@ -190,23 +198,23 @@ func replaceOrAppendEnvValues(defaults, overrides []string) []string { cache := make(map[string]int, len(defaults)) results := make([]string, 0, len(defaults)) for i, e := range defaults { - parts := strings.SplitN(e, "=", 2) + k, _, _ := strings.Cut(e, "=") results = append(results, e) - cache[parts[0]] = i + cache[k] = i } for _, value := range overrides { // Values w/o = means they want this env to be removed/unset. - if !strings.Contains(value, "=") { - if i, exists := cache[value]; exists { + k, _, ok := strings.Cut(value, "=") + if !ok { + if i, exists := cache[k]; exists { results[i] = "" // Used to indicate it should be removed } continue } // Just do a normal set/update - parts := strings.SplitN(value, "=", 2) - if i, exists := cache[parts[0]]; exists { + if i, exists := cache[k]; exists { results[i] = value } else { results = append(results, value) @@ -229,6 +237,7 @@ func WithProcessArgs(args ...string) SpecOpts { return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { setProcess(s) s.Process.Args = args + s.Process.CommandLine = "" return nil } } @@ -276,6 +285,14 @@ func WithHostname(name string) SpecOpts { } } +// WithDomainname sets the container's NIS domain name +func WithDomainname(name string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + s.Domainname = name + return nil + } +} + // WithMounts appends mounts func WithMounts(mounts []specs.Mount) SpecOpts { return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { @@ -358,17 +375,19 @@ func WithImageConfigArgs(image Image, args []string) SpecOpts { return err } var ( - ociimage v1.Image - config v1.ImageConfig + imageConfigBytes []byte + ociimage v1.Image + config v1.ImageConfig ) switch ic.MediaType { case v1.MediaTypeImageConfig, images.MediaTypeDockerSchema2Config: - p, err := content.ReadBlob(ctx, image.ContentStore(), ic) + var err error + imageConfigBytes, err = content.ReadBlob(ctx, image.ContentStore(), ic) if err != nil { return err } - if err := json.Unmarshal(p, &ociimage); err != nil { + if err := json.Unmarshal(imageConfigBytes, &ociimage); err != nil { return err } config = ociimage.Config @@ -376,6 +395,7 @@ func WithImageConfigArgs(image Image, args []string) SpecOpts { return fmt.Errorf("unknown image config media type %s", ic.MediaType) } + appendOSMounts(s, ociimage.OS) setProcess(s) if s.Linux != nil { defaults := config.Env @@ -405,11 +425,55 @@ func WithImageConfigArgs(image Image, args []string) SpecOpts { return WithAdditionalGIDs("root")(ctx, client, c, s) } else if s.Windows != nil { s.Process.Env = replaceOrAppendEnvValues(config.Env, s.Process.Env) + + // To support Docker ArgsEscaped on Windows we need to combine the + // image Entrypoint & (Cmd Or User Args) while taking into account + // if Docker has already escaped them in the image config. When + // Docker sets `ArgsEscaped==true` in the config it has pre-escaped + // either Entrypoint or Cmd or both. Cmd should always be treated as + // arguments appended to Entrypoint unless: + // + // 1. Entrypoint does not exist, in which case Cmd[0] is the + // executable. + // + // 2. The user overrides the Cmd with User Args when activating the + // container in which case those args should be appended to the + // Entrypoint if it exists. + // + // To effectively do this we need to know if the arguments came from + // the user or if the arguments came from the image config when + // ArgsEscaped==true. In this case we only want to escape the + // additional user args when forming the complete CommandLine. This + // is safe in both cases of Entrypoint or Cmd being set because + // Docker will always escape them to an array of length one. Thus in + // both cases it is the "executable" portion of the command. + // + // In the case ArgsEscaped==false, Entrypoint or Cmd will contain + // any number of entries that are all unescaped and can simply be + // combined (potentially overwriting Cmd with User Args if present) + // and forwarded the container start as an Args array. cmd := config.Cmd + cmdFromImage := true if len(args) > 0 { cmd = args + cmdFromImage = false + } + + cmd = append(config.Entrypoint, cmd...) + if len(cmd) == 0 { + return errors.New("no arguments specified") + } + + if config.ArgsEscaped && (len(config.Entrypoint) > 0 || cmdFromImage) { + s.Process.Args = nil + s.Process.CommandLine = cmd[0] + if len(cmd) > 1 { + s.Process.CommandLine += " " + escapeAndCombineArgs(cmd[1:]) + } + } else { + s.Process.Args = cmd + s.Process.CommandLine = "" } - s.Process.Args = append(config.Entrypoint, cmd...) s.Process.Cwd = config.WorkingDir s.Process.User = specs.User{ @@ -1273,12 +1337,28 @@ func WithLinuxDevices(devices []specs.LinuxDevice) SpecOpts { } } +func WithLinuxDeviceFollowSymlinks(path, permissions string) SpecOpts { + return withLinuxDevice(path, permissions, true) +} + // WithLinuxDevice adds the device specified by path to the spec func WithLinuxDevice(path, permissions string) SpecOpts { + return withLinuxDevice(path, permissions, false) +} + +func withLinuxDevice(path, permissions string, followSymlinks bool) SpecOpts { return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { setLinux(s) setResources(s) + if followSymlinks { + resolvedPath, err := filepath.EvalSymlinks(path) + if err != nil { + return err + } + path = resolvedPath + } + dev, err := DeviceFromPath(path) if err != nil { return err @@ -1361,3 +1441,184 @@ func tryReadonlyMounts(mounts []mount.Mount) []mount.Mount { } return mounts } + +// WithWindowsDevice adds a device exposed to a Windows (WCOW or LCOW) Container +func WithWindowsDevice(idType, id string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + if idType == "" { + return errors.New("missing idType") + } + if s.Windows == nil { + s.Windows = &specs.Windows{} + } + s.Windows.Devices = append(s.Windows.Devices, specs.WindowsDevice{IDType: idType, ID: id}) + return nil + } +} + +// WithMemorySwap sets the container's swap in bytes +func WithMemorySwap(swap int64) SpecOpts { + return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { + setResources(s) + if s.Linux.Resources.Memory == nil { + s.Linux.Resources.Memory = &specs.LinuxMemory{} + } + s.Linux.Resources.Memory.Swap = &swap + return nil + } +} + +// WithPidsLimit sets the container's pid limit or maximum +func WithPidsLimit(limit int64) SpecOpts { + return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { + setResources(s) + if s.Linux.Resources.Pids == nil { + s.Linux.Resources.Pids = &specs.LinuxPids{} + } + s.Linux.Resources.Pids.Limit = limit + return nil + } +} + +// WithBlockIO sets the container's blkio parameters +func WithBlockIO(blockio *specs.LinuxBlockIO) SpecOpts { + return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { + setResources(s) + s.Linux.Resources.BlockIO = blockio + return nil + } +} + +// WithCPUShares sets the container's cpu shares +func WithCPUShares(shares uint64) SpecOpts { + return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { + setCPU(s) + s.Linux.Resources.CPU.Shares = &shares + return nil + } +} + +// WithCPUs sets the container's cpus/cores for use by the container +func WithCPUs(cpus string) SpecOpts { + return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { + setCPU(s) + s.Linux.Resources.CPU.Cpus = cpus + return nil + } +} + +// WithCPUsMems sets the container's cpu mems for use by the container +func WithCPUsMems(mems string) SpecOpts { + return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { + setCPU(s) + s.Linux.Resources.CPU.Mems = mems + return nil + } +} + +// WithCPUCFS sets the container's Completely fair scheduling (CFS) quota and period +func WithCPUCFS(quota int64, period uint64) SpecOpts { + return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { + setCPU(s) + s.Linux.Resources.CPU.Quota = "a + s.Linux.Resources.CPU.Period = &period + return nil + } +} + +// WithCPURT sets the container's realtime scheduling (RT) runtime and period. +func WithCPURT(runtime int64, period uint64) SpecOpts { + return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { + setCPU(s) + s.Linux.Resources.CPU.RealtimeRuntime = &runtime + s.Linux.Resources.CPU.RealtimePeriod = &period + return nil + } +} + +// WithoutRunMount removes the `/run` inside the spec +func WithoutRunMount(ctx context.Context, client Client, c *containers.Container, s *Spec) error { + return WithoutMounts("/run")(ctx, client, c, s) +} + +// WithRdt sets the container's RDT parameters +func WithRdt(closID, l3CacheSchema, memBwSchema string) SpecOpts { + return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { + s.Linux.IntelRdt = &specs.LinuxIntelRdt{ + ClosID: closID, + L3CacheSchema: l3CacheSchema, + MemBwSchema: memBwSchema, + } + return nil + } +} + +// WithWindowsCPUCount sets the `Windows.Resources.CPU.Count` section to the +// `count` specified. +func WithWindowsCPUCount(count uint64) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setCPUWindows(s) + s.Windows.Resources.CPU.Count = &count + return nil + } +} + +// WithWindowsCPUShares sets the `Windows.Resources.CPU.Shares` section to the +// `shares` specified. +func WithWindowsCPUShares(shares uint16) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setCPUWindows(s) + s.Windows.Resources.CPU.Shares = &shares + return nil + } +} + +// WithWindowsCPUMaximum sets the `Windows.Resources.CPU.Maximum` section to the +// `max` specified. +func WithWindowsCPUMaximum(max uint16) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setCPUWindows(s) + s.Windows.Resources.CPU.Maximum = &max + return nil + } +} + +// WithWindowsIgnoreFlushesDuringBoot sets `Windows.IgnoreFlushesDuringBoot`. +func WithWindowsIgnoreFlushesDuringBoot() SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + if s.Windows == nil { + s.Windows = &specs.Windows{} + } + s.Windows.IgnoreFlushesDuringBoot = true + return nil + } +} + +// WithWindowNetworksAllowUnqualifiedDNSQuery sets `Windows.Network.AllowUnqualifiedDNSQuery`. +func WithWindowNetworksAllowUnqualifiedDNSQuery() SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + if s.Windows == nil { + s.Windows = &specs.Windows{} + } + if s.Windows.Network == nil { + s.Windows.Network = &specs.WindowsNetwork{} + } + + s.Windows.Network.AllowUnqualifiedDNSQuery = true + return nil + } +} + +// WithWindowsNetworkNamespace sets the network namespace for a Windows container. +func WithWindowsNetworkNamespace(ns string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + if s.Windows == nil { + s.Windows = &specs.Windows{} + } + if s.Windows.Network == nil { + s.Windows.Network = &specs.WindowsNetwork{} + } + s.Windows.Network.NetworkNamespace = ns + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_linux.go b/vendor/github.com/containerd/containerd/oci/spec_opts_linux.go index 4d8841ee11505..bccea766aa182 100644 --- a/vendor/github.com/containerd/containerd/oci/spec_opts_linux.go +++ b/vendor/github.com/containerd/containerd/oci/spec_opts_linux.go @@ -60,67 +60,6 @@ func WithDevices(devicePath, containerPath, permissions string) SpecOpts { } } -// WithMemorySwap sets the container's swap in bytes -func WithMemorySwap(swap int64) SpecOpts { - return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { - setResources(s) - if s.Linux.Resources.Memory == nil { - s.Linux.Resources.Memory = &specs.LinuxMemory{} - } - s.Linux.Resources.Memory.Swap = &swap - return nil - } -} - -// WithPidsLimit sets the container's pid limit or maximum -func WithPidsLimit(limit int64) SpecOpts { - return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { - setResources(s) - if s.Linux.Resources.Pids == nil { - s.Linux.Resources.Pids = &specs.LinuxPids{} - } - s.Linux.Resources.Pids.Limit = limit - return nil - } -} - -// WithCPUShares sets the container's cpu shares -func WithCPUShares(shares uint64) SpecOpts { - return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { - setCPU(s) - s.Linux.Resources.CPU.Shares = &shares - return nil - } -} - -// WithCPUs sets the container's cpus/cores for use by the container -func WithCPUs(cpus string) SpecOpts { - return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { - setCPU(s) - s.Linux.Resources.CPU.Cpus = cpus - return nil - } -} - -// WithCPUsMems sets the container's cpu mems for use by the container -func WithCPUsMems(mems string) SpecOpts { - return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { - setCPU(s) - s.Linux.Resources.CPU.Mems = mems - return nil - } -} - -// WithCPUCFS sets the container's Completely fair scheduling (CFS) quota and period -func WithCPUCFS(quota int64, period uint64) SpecOpts { - return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { - setCPU(s) - s.Linux.Resources.CPU.Quota = "a - s.Linux.Resources.CPU.Period = &period - return nil - } -} - // WithAllCurrentCapabilities propagates the effective capabilities of the caller process to the container process. // The capability set may differ from WithAllKnownCapabilities when running in a container. var WithAllCurrentCapabilities = func(ctx context.Context, client Client, c *containers.Container, s *Spec) error { @@ -131,25 +70,12 @@ var WithAllCurrentCapabilities = func(ctx context.Context, client Client, c *con return WithCapabilities(caps)(ctx, client, c, s) } -// WithAllKnownCapabilities sets all the the known linux capabilities for the container process +// WithAllKnownCapabilities sets all the known linux capabilities for the container process var WithAllKnownCapabilities = func(ctx context.Context, client Client, c *containers.Container, s *Spec) error { caps := cap.Known() return WithCapabilities(caps)(ctx, client, c, s) } -// WithoutRunMount removes the `/run` inside the spec -func WithoutRunMount(ctx context.Context, client Client, c *containers.Container, s *Spec) error { - return WithoutMounts("/run")(ctx, client, c, s) -} - -// WithRdt sets the container's RDT parameters -func WithRdt(closID, l3CacheSchema, memBwSchema string) SpecOpts { - return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { - s.Linux.IntelRdt = &specs.LinuxIntelRdt{ - ClosID: closID, - L3CacheSchema: l3CacheSchema, - MemBwSchema: memBwSchema, - } - return nil - } +func escapeAndCombineArgs(args []string) string { + panic("not supported") } diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_nonlinux.go b/vendor/github.com/containerd/containerd/oci/spec_opts_nonlinux.go index ec9149279851b..b2f796f3613d1 100644 --- a/vendor/github.com/containerd/containerd/oci/spec_opts_nonlinux.go +++ b/vendor/github.com/containerd/containerd/oci/spec_opts_nonlinux.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux /* Copyright The containerd Authors. @@ -21,38 +20,17 @@ package oci import ( "context" - "errors" "github.com/containerd/containerd/containers" ) // WithAllCurrentCapabilities propagates the effective capabilities of the caller process to the container process. // The capability set may differ from WithAllKnownCapabilities when running in a container. -// -//nolint:deadcode,unused var WithAllCurrentCapabilities = func(ctx context.Context, client Client, c *containers.Container, s *Spec) error { return WithCapabilities(nil)(ctx, client, c, s) } -// WithAllKnownCapabilities sets all the the known linux capabilities for the container process -// -//nolint:deadcode,unused +// WithAllKnownCapabilities sets all the known linux capabilities for the container process var WithAllKnownCapabilities = func(ctx context.Context, client Client, c *containers.Container, s *Spec) error { return WithCapabilities(nil)(ctx, client, c, s) } - -// WithCPUShares sets the container's cpu shares -// -//nolint:deadcode,unused -func WithCPUShares(shares uint64) SpecOpts { - return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { - return nil - } -} - -// WithRdt sets the container's RDT parameters -func WithRdt(closID, l3CacheSchema, memBwSchema string) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, _ *Spec) error { - return errors.New("RDT not supported") - } -} diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go b/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go index 9d03091aa8104..f4135285ccb63 100644 --- a/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go +++ b/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go @@ -1,5 +1,4 @@ //go:build !linux && !windows -// +build !linux,!windows /* Copyright The containerd Authors. @@ -51,9 +50,6 @@ func WithDevices(devicePath, containerPath, permissions string) SpecOpts { } } -// WithCPUCFS sets the container's Completely fair scheduling (CFS) quota and period -func WithCPUCFS(quota int64, period uint64) SpecOpts { - return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { - return nil - } +func escapeAndCombineArgs(args []string) string { + panic("not supported") } diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go b/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go index 5502257a48ac2..7624daf11214c 100644 --- a/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go +++ b/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go @@ -19,48 +19,28 @@ package oci import ( "context" "errors" + "strings" + + "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/sys/windows" "github.com/containerd/containerd/containers" - specs "github.com/opencontainers/runtime-spec/specs-go" ) -// WithWindowsCPUCount sets the `Windows.Resources.CPU.Count` section to the -// `count` specified. -func WithWindowsCPUCount(count uint64) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - if s.Windows.Resources == nil { - s.Windows.Resources = &specs.WindowsResources{} - } - if s.Windows.Resources.CPU == nil { - s.Windows.Resources.CPU = &specs.WindowsCPUResources{} - } - s.Windows.Resources.CPU.Count = &count - return nil - } -} - -// WithWindowsIgnoreFlushesDuringBoot sets `Windows.IgnoreFlushesDuringBoot`. -func WithWindowsIgnoreFlushesDuringBoot() SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - if s.Windows == nil { - s.Windows = &specs.Windows{} - } - s.Windows.IgnoreFlushesDuringBoot = true - return nil +func escapeAndCombineArgs(args []string) string { + escaped := make([]string, len(args)) + for i, a := range args { + escaped[i] = windows.EscapeArg(a) } + return strings.Join(escaped, " ") } -// WithWindowNetworksAllowUnqualifiedDNSQuery sets `Windows.Network.AllowUnqualifiedDNSQuery`. -func WithWindowNetworksAllowUnqualifiedDNSQuery() SpecOpts { +// WithProcessCommandLine replaces the command line on the generated spec +func WithProcessCommandLine(cmdLine string) SpecOpts { return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - if s.Windows == nil { - s.Windows = &specs.Windows{} - } - if s.Windows.Network == nil { - s.Windows.Network = &specs.WindowsNetwork{} - } - - s.Windows.Network.AllowUnqualifiedDNSQuery = true + setProcess(s) + s.Process.Args = nil + s.Process.CommandLine = cmdLine return nil } } @@ -76,16 +56,9 @@ func DeviceFromPath(path string) (*specs.LinuxDevice, error) { return nil, errors.New("device from path not supported on Windows") } -// WithWindowsNetworkNamespace sets the network namespace for a Windows container. -func WithWindowsNetworkNamespace(ns string) SpecOpts { - return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { - if s.Windows == nil { - s.Windows = &specs.Windows{} - } - if s.Windows.Network == nil { - s.Windows.Network = &specs.WindowsNetwork{} - } - s.Windows.Network.NetworkNamespace = ns +// WithDevices does nothing on Windows. +func WithDevices(devicePath, containerPath, permissions string) SpecOpts { + return func(ctx context.Context, client Client, container *containers.Container, spec *Spec) error { return nil } } diff --git a/vendor/github.com/containerd/containerd/oci/utils_unix.go b/vendor/github.com/containerd/containerd/oci/utils_unix.go index db75b0bade0a1..b3cb8a600d904 100644 --- a/vendor/github.com/containerd/containerd/oci/utils_unix.go +++ b/vendor/github.com/containerd/containerd/oci/utils_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. @@ -127,7 +126,7 @@ func getDevices(path, containerPath string) ([]specs.LinuxDevice, error) { // TODO consider adding these consts to the OCI runtime-spec. const ( - wildcardDevice = "a" //nolint // currently unused, but should be included when upstreaming to OCI runtime-spec. + wildcardDevice = "a" //nolint:nolintlint,unused,varcheck // currently unused, but should be included when upstreaming to OCI runtime-spec. blockDevice = "b" charDevice = "c" // or "u" fifoDevice = "p" @@ -148,7 +147,7 @@ func DeviceFromPath(path string) (*specs.LinuxDevice, error) { } var ( - devNumber = uint64(stat.Rdev) //nolint: unconvert // the type is 32bit on mips. + devNumber = uint64(stat.Rdev) //nolint:nolintlint,unconvert // the type is 32bit on mips. major = unix.Major(devNumber) minor = unix.Minor(devNumber) ) diff --git a/vendor/github.com/containerd/containerd/oss_fuzz.go b/vendor/github.com/containerd/containerd/oss_fuzz.go new file mode 100644 index 0000000000000..8d1def4f09c3c --- /dev/null +++ b/vendor/github.com/containerd/containerd/oss_fuzz.go @@ -0,0 +1,26 @@ +//go:build gofuzz + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package containerd + +import ( + "github.com/AdamKorcz/go-118-fuzz-build/testing" +) + +// To keep this package in go.mod. +var _ = testing.F{} diff --git a/vendor/github.com/containerd/containerd/pkg/apparmor/apparmor_unsupported.go b/vendor/github.com/containerd/containerd/pkg/apparmor/apparmor_unsupported.go index 833170338ec61..3c3c5beb67480 100644 --- a/vendor/github.com/containerd/containerd/pkg/apparmor/apparmor_unsupported.go +++ b/vendor/github.com/containerd/containerd/pkg/apparmor/apparmor_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/pkg/cap/cap_linux.go b/vendor/github.com/containerd/containerd/pkg/cap/cap_linux.go index 26212573dc723..63fa104fb4383 100644 --- a/vendor/github.com/containerd/containerd/pkg/cap/cap_linux.go +++ b/vendor/github.com/containerd/containerd/pkg/cap/cap_linux.go @@ -80,15 +80,14 @@ func ParseProcPIDStatus(r io.Reader) (map[Type]uint64, error) { scanner := bufio.NewScanner(r) for scanner.Scan() { line := scanner.Text() - pair := strings.SplitN(line, ":", 2) - if len(pair) != 2 { + k, v, ok := strings.Cut(line, ":") + if !ok { continue } - k := strings.TrimSpace(pair[0]) - v := strings.TrimSpace(pair[1]) + k = strings.TrimSpace(k) switch k { case "CapInh", "CapPrm", "CapEff", "CapBnd", "CapAmb": - ui64, err := strconv.ParseUint(v, 16, 64) + ui64, err := strconv.ParseUint(strings.TrimSpace(v), 16, 64) if err != nil { return nil, fmt.Errorf("failed to parse line %q", line) } diff --git a/vendor/github.com/containerd/containerd/pkg/cleanup/context.go b/vendor/github.com/containerd/containerd/pkg/cleanup/context.go new file mode 100644 index 0000000000000..62741c4ca0a96 --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/cleanup/context.go @@ -0,0 +1,52 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package providing utilies to help cleanup +package cleanup + +import ( + "context" + "time" +) + +type clearCancel struct { + context.Context +} + +func (cc clearCancel) Deadline() (deadline time.Time, ok bool) { + return +} + +func (cc clearCancel) Done() <-chan struct{} { + return nil +} + +func (cc clearCancel) Err() error { + return nil +} + +// Background creates a new context which clears out the parent errors +func Background(ctx context.Context) context.Context { + return clearCancel{ctx} +} + +// Do runs the provided function with a context in which the +// errors are cleared out and will timeout after 10 seconds. +func Do(ctx context.Context, do func(context.Context)) { + ctx, cancel := context.WithTimeout(clearCancel{ctx}, 10*time.Second) + do(ctx) + cancel() +} diff --git a/vendor/github.com/containerd/containerd/pkg/dialer/dialer_unix.go b/vendor/github.com/containerd/containerd/pkg/dialer/dialer_unix.go index b4304ffbf1f20..798566bc5dacd 100644 --- a/vendor/github.com/containerd/containerd/pkg/dialer/dialer_unix.go +++ b/vendor/github.com/containerd/containerd/pkg/dialer/dialer_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/pkg/epoch/context.go b/vendor/github.com/containerd/containerd/pkg/epoch/context.go new file mode 100644 index 0000000000000..fd16f951969fe --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/epoch/context.go @@ -0,0 +1,41 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package epoch + +import ( + "context" + "time" +) + +type ( + epochKey struct{} +) + +// WithSourceDateEpoch associates the context with the epoch. +func WithSourceDateEpoch(ctx context.Context, tm *time.Time) context.Context { + return context.WithValue(ctx, epochKey{}, tm) +} + +// FromContext returns the epoch associated with the context. +// FromContext does not fall back to read the SOURCE_DATE_EPOCH env var. +func FromContext(ctx context.Context) *time.Time { + v := ctx.Value(epochKey{}) + if v == nil { + return nil + } + return v.(*time.Time) +} diff --git a/vendor/github.com/containerd/containerd/pkg/epoch/epoch.go b/vendor/github.com/containerd/containerd/pkg/epoch/epoch.go new file mode 100644 index 0000000000000..124e8edb507ab --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/epoch/epoch.go @@ -0,0 +1,69 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package epoch provides SOURCE_DATE_EPOCH utilities. +package epoch + +import ( + "fmt" + "os" + "strconv" + "time" + + "github.com/sirupsen/logrus" +) + +// SourceDateEpochEnv is the SOURCE_DATE_EPOCH env var. +// See https://reproducible-builds.org/docs/source-date-epoch/ +const SourceDateEpochEnv = "SOURCE_DATE_EPOCH" + +// SourceDateEpoch returns the SOURCE_DATE_EPOCH env var as *time.Time. +// If the env var is not set, SourceDateEpoch returns nil without an error. +func SourceDateEpoch() (*time.Time, error) { + v, ok := os.LookupEnv(SourceDateEpochEnv) + if !ok || v == "" { + return nil, nil // not an error + } + i64, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return nil, fmt.Errorf("invalid %s value %q: %w", SourceDateEpochEnv, v, err) + } + unix := time.Unix(i64, 0).UTC() + return &unix, nil +} + +// SourceDateEpochOrNow returns the SOURCE_DATE_EPOCH time if available, +// otherwise returns the current time. +func SourceDateEpochOrNow() time.Time { + epoch, err := SourceDateEpoch() + if err != nil { + logrus.WithError(err).Warnf("Invalid %s", SourceDateEpochEnv) + } + if epoch != nil { + return *epoch + } + return time.Now().UTC() +} + +// SetSourceDateEpoch sets the SOURCE_DATE_EPOCH env var. +func SetSourceDateEpoch(tm time.Time) { + os.Setenv(SourceDateEpochEnv, fmt.Sprintf("%d", tm.Unix())) +} + +// UnsetSourceDateEpoch unsets the SOURCE_DATE_EPOCH env var. +func UnsetSourceDateEpoch() { + os.Unsetenv(SourceDateEpochEnv) +} diff --git a/vendor/github.com/containerd/containerd/pkg/randutil/randutil.go b/vendor/github.com/containerd/containerd/pkg/randutil/randutil.go new file mode 100644 index 0000000000000..f4b657d7dd870 --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/randutil/randutil.go @@ -0,0 +1,48 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package randutil provides utilities for [cyrpto/rand]. +package randutil + +import ( + "crypto/rand" + "math" + "math/big" +) + +// Int63n is similar to [math/rand.Int63n] but uses [crypto/rand.Reader] under the hood. +func Int63n(n int64) int64 { + b, err := rand.Int(rand.Reader, big.NewInt(n)) + if err != nil { + panic(err) + } + return b.Int64() +} + +// Int63 is similar to [math/rand.Int63] but uses [crypto/rand.Reader] under the hood. +func Int63() int64 { + return Int63n(math.MaxInt64) +} + +// Intn is similar to [math/rand.Intn] but uses [crypto/rand.Reader] under the hood. +func Intn(n int) int { + return int(Int63n(int64(n))) +} + +// Int is similar to [math/rand.Int] but uses [crypto/rand.Reader] under the hood. +func Int() int { + return int(Int63()) +} diff --git a/vendor/github.com/containerd/containerd/pkg/runtimeoptions/v1/api.pb.go b/vendor/github.com/containerd/containerd/pkg/runtimeoptions/v1/api.pb.go index a224dc6f2136f..8a30127be2fc9 100644 --- a/vendor/github.com/containerd/containerd/pkg/runtimeoptions/v1/api.pb.go +++ b/vendor/github.com/containerd/containerd/pkg/runtimeoptions/v1/api.pb.go @@ -1,397 +1,177 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// To regenerate api.pb.go run `make protos` + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/pkg/runtimeoptions/v1/api.proto package runtimeoptions_v1 import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type Options struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // TypeUrl specifies the type of the content inside the config file. TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` // ConfigPath specifies the filesystem location of the config file // used by the runtime. - ConfigPath string `protobuf:"bytes,2,opt,name=config_path,json=configPath,proto3" json:"config_path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_sizecache int32 `json:"-"` + ConfigPath string `protobuf:"bytes,2,opt,name=config_path,json=configPath,proto3" json:"config_path,omitempty"` + // Blob specifies an in-memory TOML blob passed from containerd's configuration section + // for this runtime. This will be used if config_path is not specified. + ConfigBody []byte `protobuf:"bytes,3,opt,name=config_body,json=configBody,proto3" json:"config_body,omitempty"` +} + +func (x *Options) Reset() { + *x = Options{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *Options) Reset() { *m = Options{} } -func (*Options) ProtoMessage() {} -func (*Options) Descriptor() ([]byte, []int) { - return fileDescriptor_7700dd27e3487aa6, []int{0} +func (x *Options) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Options) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Options) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Options.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + +func (*Options) ProtoMessage() {} + +func (x *Options) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } -} -func (m *Options) XXX_Merge(src proto.Message) { - xxx_messageInfo_Options.Merge(m, src) -} -func (m *Options) XXX_Size() int { - return m.Size() -} -func (m *Options) XXX_DiscardUnknown() { - xxx_messageInfo_Options.DiscardUnknown(m) + return mi.MessageOf(x) } -var xxx_messageInfo_Options proto.InternalMessageInfo +// Deprecated: Use Options.ProtoReflect.Descriptor instead. +func (*Options) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_rawDescGZIP(), []int{0} +} -func (m *Options) GetTypeUrl() string { - if m != nil { - return m.TypeUrl +func (x *Options) GetTypeUrl() string { + if x != nil { + return x.TypeUrl } return "" } -func (m *Options) GetConfigPath() string { - if m != nil { - return m.ConfigPath +func (x *Options) GetConfigPath() string { + if x != nil { + return x.ConfigPath } return "" } -func init() { - proto.RegisterType((*Options)(nil), "runtimeoptions.v1.Options") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/pkg/runtimeoptions/v1/api.proto", fileDescriptor_7700dd27e3487aa6) -} - -var fileDescriptor_7700dd27e3487aa6 = []byte{ - // 214 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x48, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d, - 0x4a, 0x41, 0x66, 0x16, 0x64, 0xa7, 0xeb, 0x17, 0x95, 0xe6, 0x95, 0x64, 0xe6, 0xa6, 0xe6, 0x17, - 0x94, 0x64, 0xe6, 0xe7, 0x15, 0xeb, 0x97, 0x19, 0xea, 0x27, 0x16, 0x64, 0xea, 0x15, 0x14, 0xe5, - 0x97, 0xe4, 0x0b, 0x09, 0xa2, 0x4a, 0xea, 0x95, 0x19, 0x4a, 0xe9, 0x22, 0x19, 0x9a, 0x9e, 0x9f, - 0x9e, 0xaf, 0x0f, 0x56, 0x99, 0x54, 0x9a, 0x06, 0xe6, 0x81, 0x39, 0x60, 0x16, 0xc4, 0x04, 0x25, - 0x57, 0x2e, 0x76, 0x7f, 0x88, 0x66, 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, - 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, - 0x9e, 0x8b, 0x3b, 0x39, 0x3f, 0x2f, 0x2d, 0x33, 0x3d, 0xbe, 0x20, 0xb1, 0x24, 0x43, 0x82, 0x09, - 0x2c, 0xcb, 0x05, 0x11, 0x0a, 0x48, 0x2c, 0xc9, 0x70, 0x4a, 0x3b, 0xf1, 0x50, 0x8e, 0xf1, 0xc6, - 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, - 0xe0, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x51, 0x1e, 0xe4, 0x79, 0xd4, 0x1a, 0x55, 0x24, - 0xbe, 0xcc, 0x30, 0x89, 0x0d, 0xec, 0x6a, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x91, 0x3c, - 0x3e, 0x79, 0x3b, 0x01, 0x00, 0x00, -} - -func (m *Options) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *Options) GetConfigBody() []byte { + if x != nil { + return x.ConfigBody } - return dAtA[:n], nil + return nil } -func (m *Options) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +var File_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_rawDesc = []byte{ + 0x0a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x31, 0x2f, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x11, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2e, 0x76, 0x31, 0x22, 0x66, 0x0a, 0x07, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x1f, 0x0a, 0x0b, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x6f, 0x64, 0x79, 0x42, 0x4a, 0x5a, + 0x48, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x31, 0x3b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } -func (m *Options) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ConfigPath) > 0 { - i -= len(m.ConfigPath) - copy(dAtA[i:], m.ConfigPath) - i = encodeVarintApi(dAtA, i, uint64(len(m.ConfigPath))) - i-- - dAtA[i] = 0x12 - } - if len(m.TypeUrl) > 0 { - i -= len(m.TypeUrl) - copy(dAtA[i:], m.TypeUrl) - i = encodeVarintApi(dAtA, i, uint64(len(m.TypeUrl))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} +var ( + file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_rawDescData = file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_rawDesc +) -func encodeVarintApi(dAtA []byte, offset int, v uint64) int { - offset -= sovApi(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Options) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.TypeUrl) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - l = len(m.ConfigPath) - if l > 0 { - n += 1 + l + sovApi(uint64(l)) - } - return n +func file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_rawDescData) + }) + return file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_rawDescData } -func sovApi(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozApi(x uint64) (n int) { - return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Options) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Options{`, - `TypeUrl:` + fmt.Sprintf("%v", this.TypeUrl) + `,`, - `ConfigPath:` + fmt.Sprintf("%v", this.ConfigPath) + `,`, - `}`, - }, "") - return s +var file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_goTypes = []interface{}{ + (*Options)(nil), // 0: runtimeoptions.v1.Options } -func valueToStringApi(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) +var file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -func (m *Options) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Options: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Options: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TypeUrl = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowApi - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthApi - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthApi - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ConfigPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipApi(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthApi - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func init() { file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_init() } +func file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_init() { + if File_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto != nil { + return } - return nil -} -func skipApi(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowApi - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthApi + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Options); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupApi - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthApi - } - if depth == 0 { - return iNdEx, nil } } - return 0, io.ErrUnexpectedEOF + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto = out.File + file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_rawDesc = nil + file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_goTypes = nil + file_github_com_containerd_containerd_pkg_runtimeoptions_v1_api_proto_depIdxs = nil } - -var ( - ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupApi = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/containerd/containerd/pkg/runtimeoptions/v1/api.proto b/vendor/github.com/containerd/containerd/pkg/runtimeoptions/v1/api.proto index c771ea10ee7cd..d0ab0e2f95443 100644 --- a/vendor/github.com/containerd/containerd/pkg/runtimeoptions/v1/api.proto +++ b/vendor/github.com/containerd/containerd/pkg/runtimeoptions/v1/api.proto @@ -3,17 +3,6 @@ syntax = "proto3"; package runtimeoptions.v1; -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; - -option (gogoproto.goproto_stringer_all) = false; -option (gogoproto.stringer_all) = true; -option (gogoproto.goproto_getters_all) = true; -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.goproto_unrecognized_all) = false; - - option go_package = "github.com/containerd/containerd/pkg/runtimeoptions/v1;runtimeoptions_v1"; message Options { @@ -22,4 +11,7 @@ message Options { // ConfigPath specifies the filesystem location of the config file // used by the runtime. string config_path = 2; + // Blob specifies an in-memory TOML blob passed from containerd's configuration section + // for this runtime. This will be used if config_path is not specified. + bytes config_body = 3; } diff --git a/vendor/github.com/containerd/containerd/pkg/runtimeoptions/v1/doc.go b/vendor/github.com/containerd/containerd/pkg/runtimeoptions/v1/doc.go index 62525652f110e..9617e74043dc3 100644 --- a/vendor/github.com/containerd/containerd/pkg/runtimeoptions/v1/doc.go +++ b/vendor/github.com/containerd/containerd/pkg/runtimeoptions/v1/doc.go @@ -14,4 +14,4 @@ limitations under the License. */ -package runtimeoptions_v1 //nolint +package runtimeoptions_v1 //nolint:revive // Ignore var-naming: don't use an underscore in package name (revive) diff --git a/vendor/github.com/containerd/containerd/pkg/seccomp/seccomp_unsupported.go b/vendor/github.com/containerd/containerd/pkg/seccomp/seccomp_unsupported.go index 4458c1c702775..41c2ceacbb6bb 100644 --- a/vendor/github.com/containerd/containerd/pkg/seccomp/seccomp_unsupported.go +++ b/vendor/github.com/containerd/containerd/pkg/seccomp/seccomp_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/pkg/shutdown/shutdown.go b/vendor/github.com/containerd/containerd/pkg/shutdown/shutdown.go index bc1af75abb7da..12ffac14570bf 100644 --- a/vendor/github.com/containerd/containerd/pkg/shutdown/shutdown.go +++ b/vendor/github.com/containerd/containerd/pkg/shutdown/shutdown.go @@ -37,6 +37,11 @@ type Service interface { // the shutdown channel is closed. A callback error will propagate to the // context error RegisterCallback(func(context.Context) error) + // Done returns a channel that's closed when all shutdown callbacks are invoked. + Done() <-chan struct{} + // Err returns nil if Done is not yet closed. + // If Done is closed, Err returns first failed callback error or ErrShutdown. + Err() error } // WithShutdown returns a context which is similar to a cancel context, but @@ -99,6 +104,7 @@ func (s *shutdownService) Err() error { defer s.mu.Unlock() return s.err } + func (s *shutdownService) RegisterCallback(fn func(context.Context) error) { s.mu.Lock() defer s.mu.Unlock() diff --git a/vendor/github.com/containerd/containerd/pkg/streaming/streaming.go b/vendor/github.com/containerd/containerd/pkg/streaming/streaming.go new file mode 100644 index 0000000000000..15fc6c3c1b681 --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/streaming/streaming.go @@ -0,0 +1,47 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package streaming + +import ( + "context" + + "github.com/containerd/typeurl/v2" +) + +type StreamManager interface { + StreamGetter + Register(context.Context, string, Stream) error +} + +type StreamGetter interface { + Get(context.Context, string) (Stream, error) +} + +type StreamCreator interface { + Create(context.Context, string) (Stream, error) +} + +type Stream interface { + // Send sends the object on the stream + Send(typeurl.Any) error + + // Recv receives an object on the stream + Recv() (typeurl.Any, error) + + // Close closes the stream + Close() error +} diff --git a/vendor/github.com/containerd/containerd/pkg/transfer/proxy/transfer.go b/vendor/github.com/containerd/containerd/pkg/transfer/proxy/transfer.go new file mode 100644 index 0000000000000..50dba0b37c788 --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/transfer/proxy/transfer.go @@ -0,0 +1,122 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package proxy + +import ( + "context" + "errors" + "io" + + "google.golang.org/protobuf/types/known/anypb" + + transferapi "github.com/containerd/containerd/api/services/transfer/v1" + transfertypes "github.com/containerd/containerd/api/types/transfer" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/pkg/streaming" + "github.com/containerd/containerd/pkg/transfer" + tstreaming "github.com/containerd/containerd/pkg/transfer/streaming" + "github.com/containerd/typeurl/v2" +) + +type proxyTransferrer struct { + client transferapi.TransferClient + streamCreator streaming.StreamCreator +} + +// NewTransferrer returns a new transferrer which communicates over a GRPC +// connection using the containerd transfer API +func NewTransferrer(client transferapi.TransferClient, sc streaming.StreamCreator) transfer.Transferrer { + return &proxyTransferrer{ + client: client, + streamCreator: sc, + } +} + +func (p *proxyTransferrer) Transfer(ctx context.Context, src interface{}, dst interface{}, opts ...transfer.Opt) error { + o := &transfer.Config{} + for _, opt := range opts { + opt(o) + } + apiOpts := &transferapi.TransferOptions{} + if o.Progress != nil { + sid := tstreaming.GenerateID("progress") + stream, err := p.streamCreator.Create(ctx, sid) + if err != nil { + return err + } + apiOpts.ProgressStream = sid + go func() { + for { + a, err := stream.Recv() + if err != nil { + if !errors.Is(err, io.EOF) { + log.G(ctx).WithError(err).Error("progress stream failed to recv") + } + return + } + i, err := typeurl.UnmarshalAny(a) + if err != nil { + log.G(ctx).WithError(err).Warnf("failed to unmarshal progress object: %v", a.GetTypeUrl()) + } + switch v := i.(type) { + case *transfertypes.Progress: + o.Progress(transfer.Progress{ + Event: v.Event, + Name: v.Name, + Parents: v.Parents, + Progress: v.Progress, + Total: v.Total, + }) + default: + log.G(ctx).Warnf("unhandled progress object %T: %v", i, a.GetTypeUrl()) + } + } + }() + } + asrc, err := p.marshalAny(ctx, src) + if err != nil { + return err + } + adst, err := p.marshalAny(ctx, dst) + if err != nil { + return err + } + req := &transferapi.TransferRequest{ + Source: &anypb.Any{ + TypeUrl: asrc.GetTypeUrl(), + Value: asrc.GetValue(), + }, + Destination: &anypb.Any{ + TypeUrl: adst.GetTypeUrl(), + Value: adst.GetValue(), + }, + Options: apiOpts, + } + _, err = p.client.Transfer(ctx, req) + return err +} +func (p *proxyTransferrer) marshalAny(ctx context.Context, i interface{}) (typeurl.Any, error) { + switch m := i.(type) { + case streamMarshaler: + return m.MarshalAny(ctx, p.streamCreator) + } + return typeurl.MarshalAny(i) +} + +type streamMarshaler interface { + MarshalAny(context.Context, streaming.StreamCreator) (typeurl.Any, error) +} diff --git a/vendor/github.com/containerd/containerd/pkg/transfer/streaming/stream.go b/vendor/github.com/containerd/containerd/pkg/transfer/streaming/stream.go new file mode 100644 index 0000000000000..7352fda869534 --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/transfer/streaming/stream.go @@ -0,0 +1,210 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package streaming + +import ( + "context" + "crypto/rand" + "encoding/base64" + "errors" + "fmt" + "io" + "sync" + "time" + + transferapi "github.com/containerd/containerd/api/types/transfer" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/pkg/streaming" + "github.com/containerd/typeurl/v2" +) + +const maxRead = 32 * 1024 +const windowSize = 2 * maxRead + +var bufPool = &sync.Pool{ + New: func() interface{} { + buffer := make([]byte, maxRead) + return &buffer + }, +} + +func SendStream(ctx context.Context, r io.Reader, stream streaming.Stream) { + window := make(chan int32) + go func() { + defer close(window) + for { + select { + case <-ctx.Done(): + return + default: + } + + any, err := stream.Recv() + if err != nil { + if !errors.Is(err, io.EOF) && !errors.Is(err, context.Canceled) { + log.G(ctx).WithError(err).Error("send stream ended without EOF") + } + return + } + i, err := typeurl.UnmarshalAny(any) + if err != nil { + log.G(ctx).WithError(err).Error("failed to unmarshal stream object") + continue + } + switch v := i.(type) { + case *transferapi.WindowUpdate: + select { + case <-ctx.Done(): + return + case window <- v.Update: + } + default: + log.G(ctx).Errorf("unexpected stream object of type %T", i) + } + } + }() + go func() { + defer stream.Close() + + buf := bufPool.Get().(*[]byte) + defer bufPool.Put(buf) + + var remaining int32 + + for { + if remaining > 0 { + // Don't wait for window update since there are remaining + select { + case <-ctx.Done(): + // TODO: Send error message on stream before close to allow remote side to return error + return + case update := <-window: + remaining += update + default: + } + } else { + // Block until window updated + select { + case <-ctx.Done(): + // TODO: Send error message on stream before close to allow remote side to return error + return + case update := <-window: + remaining = update + } + } + var max int32 = maxRead + if max > remaining { + max = remaining + } + b := (*buf)[:max] + n, err := r.Read(b) + if err != nil { + if !errors.Is(err, io.EOF) { + log.G(ctx).WithError(err).Errorf("failed to read stream source") + // TODO: Send error message on stream before close to allow remote side to return error + } + return + } + remaining = remaining - int32(n) + + data := &transferapi.Data{ + Data: b[:n], + } + any, err := typeurl.MarshalAny(data) + if err != nil { + log.G(ctx).WithError(err).Errorf("failed to marshal data for send") + // TODO: Send error message on stream before close to allow remote side to return error + return + } + if err := stream.Send(any); err != nil { + log.G(ctx).WithError(err).Errorf("send failed") + return + } + } + }() +} + +func ReceiveStream(ctx context.Context, stream streaming.Stream) io.Reader { + r, w := io.Pipe() + go func() { + defer stream.Close() + var window int32 + for { + var werr error + if window < windowSize { + update := &transferapi.WindowUpdate{ + Update: windowSize, + } + any, err := typeurl.MarshalAny(update) + if err != nil { + w.CloseWithError(fmt.Errorf("failed to marshal window update: %w", err)) + return + } + // check window update error after recv, stream may be complete + if werr = stream.Send(any); werr == nil { + window += windowSize + } else if errors.Is(werr, io.EOF) { + // TODO: Why does send return EOF here + werr = nil + } + } + any, err := stream.Recv() + if err != nil { + if errors.Is(err, io.EOF) { + err = nil + } else { + err = fmt.Errorf("received failed: %w", err) + } + w.CloseWithError(err) + return + } else if werr != nil { + // Try receive before erroring out + w.CloseWithError(fmt.Errorf("failed to send window update: %w", werr)) + return + } + i, err := typeurl.UnmarshalAny(any) + if err != nil { + w.CloseWithError(fmt.Errorf("failed to unmarshal received object: %w", err)) + return + } + switch v := i.(type) { + case *transferapi.Data: + n, err := w.Write(v.Data) + if err != nil { + w.CloseWithError(fmt.Errorf("failed to unmarshal received object: %w", err)) + // Close will error out sender + return + } + window = window - int32(n) + // TODO: Handle error case + default: + log.G(ctx).Warnf("Ignoring unknown stream object of type %T", i) + continue + } + } + + }() + + return r +} + +func GenerateID(prefix string) string { + t := time.Now() + var b [3]byte + rand.Read(b[:]) + return fmt.Sprintf("%s-%d-%s", prefix, t.Nanosecond(), base64.URLEncoding.EncodeToString(b[:])) +} diff --git a/vendor/github.com/containerd/containerd/pkg/transfer/streaming/writer.go b/vendor/github.com/containerd/containerd/pkg/transfer/streaming/writer.go new file mode 100644 index 0000000000000..94db9d6a81532 --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/transfer/streaming/writer.go @@ -0,0 +1,130 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package streaming + +import ( + "context" + "errors" + "io" + "sync/atomic" + + transferapi "github.com/containerd/containerd/api/types/transfer" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/pkg/streaming" + "github.com/containerd/typeurl/v2" +) + +func WriteByteStream(ctx context.Context, stream streaming.Stream) io.WriteCloser { + wbs := &writeByteStream{ + ctx: ctx, + stream: stream, + updated: make(chan struct{}, 1), + } + go func() { + for { + select { + case <-ctx.Done(): + return + default: + } + + any, err := stream.Recv() + if err != nil { + if !errors.Is(err, io.EOF) && !errors.Is(err, context.Canceled) { + log.G(ctx).WithError(err).Error("send byte stream ended without EOF") + } + return + } + i, err := typeurl.UnmarshalAny(any) + if err != nil { + log.G(ctx).WithError(err).Error("failed to unmarshal stream object") + continue + } + switch v := i.(type) { + case *transferapi.WindowUpdate: + atomic.AddInt32(&wbs.remaining, v.Update) + select { + case <-ctx.Done(): + return + case wbs.updated <- struct{}{}: + default: + // Don't block if no writes are waiting + } + default: + log.G(ctx).Errorf("unexpected stream object of type %T", i) + } + } + }() + + return wbs +} + +type writeByteStream struct { + ctx context.Context + stream streaming.Stream + remaining int32 + updated chan struct{} +} + +func (wbs *writeByteStream) Write(p []byte) (n int, err error) { + for len(p) > 0 { + remaining := atomic.LoadInt32(&wbs.remaining) + if remaining == 0 { + // Don't wait for window update since there are remaining + select { + case <-wbs.ctx.Done(): + // TODO: Send error message on stream before close to allow remote side to return error + err = io.ErrShortWrite + return + case <-wbs.updated: + continue + } + } + var max int32 = maxRead + if max > int32(len(p)) { + max = int32(len(p)) + } + if max > remaining { + max = remaining + } + // TODO: continue + //remaining = remaining - int32(n) + + data := &transferapi.Data{ + Data: p[:max], + } + var any typeurl.Any + any, err = typeurl.MarshalAny(data) + if err != nil { + log.G(wbs.ctx).WithError(err).Errorf("failed to marshal data for send") + // TODO: Send error message on stream before close to allow remote side to return error + return + } + if err = wbs.stream.Send(any); err != nil { + log.G(wbs.ctx).WithError(err).Errorf("send failed") + return + } + n += int(max) + p = p[max:] + atomic.AddInt32(&wbs.remaining, -1*max) + } + return +} + +func (wbs *writeByteStream) Close() error { + return wbs.stream.Close() +} diff --git a/vendor/github.com/containerd/containerd/pkg/transfer/transfer.go b/vendor/github.com/containerd/containerd/pkg/transfer/transfer.go new file mode 100644 index 0000000000000..01df8c3d39d5b --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/transfer/transfer.go @@ -0,0 +1,136 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transfer + +import ( + "context" + "io" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" +) + +type Transferrer interface { + Transfer(ctx context.Context, source interface{}, destination interface{}, opts ...Opt) error +} + +type ImageResolver interface { + Resolve(ctx context.Context) (name string, desc ocispec.Descriptor, err error) +} + +type ImageFetcher interface { + ImageResolver + + Fetcher(ctx context.Context, ref string) (Fetcher, error) +} + +type ImagePusher interface { + Pusher(context.Context, ocispec.Descriptor) (Pusher, error) +} + +type Fetcher interface { + Fetch(context.Context, ocispec.Descriptor) (io.ReadCloser, error) +} + +type Pusher interface { + Push(context.Context, ocispec.Descriptor) (content.Writer, error) +} + +// ImageFilterer is used to filter out child objects of an image +type ImageFilterer interface { + ImageFilter(images.HandlerFunc, content.Store) images.HandlerFunc +} + +// ImageStorer is a type which is capable of storing images for +// the provided descriptor. The descriptor may be any type of manifest +// including an index with multiple image references. +type ImageStorer interface { + Store(context.Context, ocispec.Descriptor, images.Store) ([]images.Image, error) +} + +// ImageGetter is type which returns an image from an image store +type ImageGetter interface { + Get(context.Context, images.Store) (images.Image, error) +} + +// ImageLookup is a type which returns images from an image store +// based on names or prefixes +type ImageLookup interface { + Lookup(context.Context, images.Store) ([]images.Image, error) +} + +// ImageExporter exports images to a writer +type ImageExporter interface { + Export(context.Context, content.Store, []images.Image) error +} + +// ImageImporter imports an image into a content store +type ImageImporter interface { + Import(context.Context, content.Store) (ocispec.Descriptor, error) +} + +// ImageImportStreamer returns an import streamer based on OCI or +// Docker image tar archives. The stream should be a raw tar stream +// and without compression. +type ImageImportStreamer interface { + ImportStream(context.Context) (io.Reader, string, error) +} + +type ImageExportStreamer interface { + ExportStream(context.Context) (io.WriteCloser, string, error) +} + +type ImageUnpacker interface { + UnpackPlatforms() []UnpackConfiguration +} + +// UnpackConfiguration specifies the platform and snapshotter to use for resolving +// the unpack Platform, if snapshotter is not specified the platform default will +// be used. +type UnpackConfiguration struct { + Platform ocispec.Platform + Snapshotter string +} + +type ProgressFunc func(Progress) + +type Config struct { + Progress ProgressFunc +} + +type Opt func(*Config) + +func WithProgress(f ProgressFunc) Opt { + return func(opts *Config) { + opts.Progress = f + } +} + +// Progress is used to represent a particular progress event or incremental +// update for the provided named object. The parents represent the names of +// the objects which initiated the progress for the provided named object. +// The name and what object it represents is determined by the implementation. +type Progress struct { + Event string + Name string + Parents []string + Progress int64 + Total int64 + // Descriptor? +} diff --git a/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go b/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go index f05ab7aa9f9c7..e1c1b6cf9c385 100644 --- a/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go +++ b/vendor/github.com/containerd/containerd/pkg/ttrpcutil/client.go @@ -17,6 +17,7 @@ package ttrpcutil import ( + "context" "errors" "fmt" "sync" @@ -42,7 +43,9 @@ type Client struct { // NewClient returns a new containerd TTRPC client that is connected to the containerd instance provided by address func NewClient(address string, opts ...ttrpc.ClientOpts) (*Client, error) { connector := func() (*ttrpc.Client, error) { - conn, err := dialer.Dialer(address, ttrpcDialTimeout) + ctx, cancel := context.WithTimeout(context.Background(), ttrpcDialTimeout) + defer cancel() + conn, err := dialer.ContextDialer(ctx, address) if err != nil { return nil, fmt.Errorf("failed to connect: %w", err) } diff --git a/vendor/github.com/containerd/containerd/pkg/unpack/unpacker.go b/vendor/github.com/containerd/containerd/pkg/unpack/unpacker.go new file mode 100644 index 0000000000000..b160da6521fe0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/pkg/unpack/unpacker.go @@ -0,0 +1,537 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package unpack + +import ( + "context" + "crypto/rand" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/labels" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/pkg/cleanup" + "github.com/containerd/containerd/pkg/kmutex" + "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/snapshots" + "github.com/containerd/containerd/tracing" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/identity" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sync/errgroup" + "golang.org/x/sync/semaphore" +) + +const ( + labelSnapshotRef = "containerd.io/snapshot.ref" + unpackSpanPrefix = "pkg.unpack.unpacker" +) + +// Result returns information about the unpacks which were completed. +type Result struct { + Unpacks int +} + +type unpackerConfig struct { + platforms []*Platform + + content content.Store + + limiter *semaphore.Weighted + duplicationSuppressor kmutex.KeyedLocker +} + +// Platform represents a platform-specific unpack configuration which includes +// the platform matcher as well as snapshotter and applier. +type Platform struct { + Platform platforms.Matcher + + SnapshotterKey string + Snapshotter snapshots.Snapshotter + SnapshotOpts []snapshots.Opt + + Applier diff.Applier + ApplyOpts []diff.ApplyOpt +} + +type UnpackerOpt func(*unpackerConfig) error + +func WithUnpackPlatform(u Platform) UnpackerOpt { + return UnpackerOpt(func(c *unpackerConfig) error { + if u.Platform == nil { + u.Platform = platforms.All + } + if u.Snapshotter == nil { + return fmt.Errorf("snapshotter must be provided to unpack") + } + if u.SnapshotterKey == "" { + if s, ok := u.Snapshotter.(fmt.Stringer); ok { + u.SnapshotterKey = s.String() + } else { + u.SnapshotterKey = "unknown" + } + } + if u.Applier == nil { + return fmt.Errorf("applier must be provided to unpack") + } + + c.platforms = append(c.platforms, &u) + + return nil + }) +} + +func WithLimiter(l *semaphore.Weighted) UnpackerOpt { + return UnpackerOpt(func(c *unpackerConfig) error { + c.limiter = l + return nil + }) +} + +func WithDuplicationSuppressor(d kmutex.KeyedLocker) UnpackerOpt { + return UnpackerOpt(func(c *unpackerConfig) error { + c.duplicationSuppressor = d + return nil + }) +} + +// Unpacker unpacks images by hooking into the image handler process. +// Unpacks happen in the backgrounds and waited on to complete. +type Unpacker struct { + unpackerConfig + + unpacks int32 + ctx context.Context + eg *errgroup.Group +} + +// NewUnpacker creates a new instance of the unpacker which can be used to wrap an +// image handler and unpack in parallel to handling. The unpacker will handle +// calling the block handlers when they are needed by the unpack process. +func NewUnpacker(ctx context.Context, cs content.Store, opts ...UnpackerOpt) (*Unpacker, error) { + eg, ctx := errgroup.WithContext(ctx) + + u := &Unpacker{ + unpackerConfig: unpackerConfig{ + content: cs, + duplicationSuppressor: kmutex.NewNoop(), + }, + ctx: ctx, + eg: eg, + } + for _, opt := range opts { + if err := opt(&u.unpackerConfig); err != nil { + return nil, err + } + } + if len(u.platforms) == 0 { + return nil, fmt.Errorf("no unpack platforms defined: %w", errdefs.ErrInvalidArgument) + } + return u, nil +} + +// Unpack wraps an image handler to filter out blob handling and scheduling them +// during the unpack process. When an image config is encountered, the unpack +// process will be started in a goroutine. +func (u *Unpacker) Unpack(h images.Handler) images.Handler { + var ( + lock sync.Mutex + layers = map[digest.Digest][]ocispec.Descriptor{} + ) + return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + ctx, span := tracing.StartSpan(ctx, tracing.Name(unpackSpanPrefix, "UnpackHandler")) + defer span.End() + span.SetAttributes( + tracing.Attribute("descriptor.media.type", desc.MediaType), + tracing.Attribute("descriptor.digest", desc.Digest.String())) + unlock, err := u.lockBlobDescriptor(ctx, desc) + if err != nil { + return nil, err + } + children, err := h.Handle(ctx, desc) + unlock() + if err != nil { + return children, err + } + + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + var nonLayers []ocispec.Descriptor + var manifestLayers []ocispec.Descriptor + // Split layers from non-layers, layers will be handled after + // the config + for i, child := range children { + span.SetAttributes( + tracing.Attribute("descriptor.child."+strconv.Itoa(i), []string{child.MediaType, child.Digest.String()}), + ) + if images.IsLayerType(child.MediaType) { + manifestLayers = append(manifestLayers, child) + } else { + nonLayers = append(nonLayers, child) + } + } + + lock.Lock() + for _, nl := range nonLayers { + layers[nl.Digest] = manifestLayers + } + lock.Unlock() + + children = nonLayers + case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: + lock.Lock() + l := layers[desc.Digest] + lock.Unlock() + if len(l) > 0 { + u.eg.Go(func() error { + return u.unpack(h, desc, l) + }) + } + } + return children, nil + }) +} + +// Wait waits for any ongoing unpack processes to complete then will return +// the result. +func (u *Unpacker) Wait() (Result, error) { + if err := u.eg.Wait(); err != nil { + return Result{}, err + } + return Result{ + Unpacks: int(u.unpacks), + }, nil +} + +func (u *Unpacker) unpack( + h images.Handler, + config ocispec.Descriptor, + layers []ocispec.Descriptor, +) error { + ctx := u.ctx + ctx, layerSpan := tracing.StartSpan(ctx, tracing.Name(unpackSpanPrefix, "unpack")) + defer layerSpan.End() + p, err := content.ReadBlob(ctx, u.content, config) + if err != nil { + return err + } + + var i ocispec.Image + if err := json.Unmarshal(p, &i); err != nil { + return fmt.Errorf("unmarshal image config: %w", err) + } + diffIDs := i.RootFS.DiffIDs + if len(layers) != len(diffIDs) { + return fmt.Errorf("number of layers and diffIDs don't match: %d != %d", len(layers), len(diffIDs)) + } + + // TODO: Support multiple unpacks rather than just first match + var unpack *Platform + + imgPlatform := platforms.Normalize(ocispec.Platform{OS: i.OS, Architecture: i.Architecture}) + for _, up := range u.platforms { + if up.Platform.Match(imgPlatform) { + unpack = up + break + } + } + + if unpack == nil { + return fmt.Errorf("unpacker does not support platform %s for image %s", imgPlatform, config.Digest) + } + + atomic.AddInt32(&u.unpacks, 1) + + var ( + sn = unpack.Snapshotter + a = unpack.Applier + cs = u.content + + chain []digest.Digest + + fetchOffset int + fetchC []chan struct{} + fetchErr chan error + ) + + // If there is an early return, ensure any ongoing + // fetches get their context cancelled + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + doUnpackFn := func(i int, desc ocispec.Descriptor) error { + parent := identity.ChainID(chain) + chain = append(chain, diffIDs[i]) + chainID := identity.ChainID(chain).String() + + unlock, err := u.lockSnChainID(ctx, chainID, unpack.SnapshotterKey) + if err != nil { + return err + } + defer unlock() + + if _, err := sn.Stat(ctx, chainID); err == nil { + // no need to handle + return nil + } else if !errdefs.IsNotFound(err) { + return fmt.Errorf("failed to stat snapshot %s: %w", chainID, err) + } + + // inherits annotations which are provided as snapshot labels. + snapshotLabels := snapshots.FilterInheritedLabels(desc.Annotations) + if snapshotLabels == nil { + snapshotLabels = make(map[string]string) + } + snapshotLabels[labelSnapshotRef] = chainID + + var ( + key string + mounts []mount.Mount + opts = append(unpack.SnapshotOpts, snapshots.WithLabels(snapshotLabels)) + ) + + for try := 1; try <= 3; try++ { + // Prepare snapshot with from parent, label as root + key = fmt.Sprintf(snapshots.UnpackKeyFormat, uniquePart(), chainID) + mounts, err = sn.Prepare(ctx, key, parent.String(), opts...) + if err != nil { + if errdefs.IsAlreadyExists(err) { + if _, err := sn.Stat(ctx, chainID); err != nil { + if !errdefs.IsNotFound(err) { + return fmt.Errorf("failed to stat snapshot %s: %w", chainID, err) + } + // Try again, this should be rare, log it + log.G(ctx).WithField("key", key).WithField("chainid", chainID).Debug("extraction snapshot already exists, chain id not found") + } else { + // no need to handle, snapshot now found with chain id + return nil + } + } else { + return fmt.Errorf("failed to prepare extraction snapshot %q: %w", key, err) + } + } else { + break + } + } + if err != nil { + return fmt.Errorf("unable to prepare extraction snapshot: %w", err) + } + + // Abort the snapshot if commit does not happen + abort := func(ctx context.Context) { + if err := sn.Remove(ctx, key); err != nil { + log.G(ctx).WithError(err).Errorf("failed to cleanup %q", key) + } + } + + if fetchErr == nil { + fetchErr = make(chan error, 1) + fetchOffset = i + fetchC = make([]chan struct{}, len(layers)-fetchOffset) + for i := range fetchC { + fetchC[i] = make(chan struct{}) + } + + go func(i int) { + err := u.fetch(ctx, h, layers[i:], fetchC) + if err != nil { + fetchErr <- err + } + close(fetchErr) + }(i) + } + + select { + case <-ctx.Done(): + cleanup.Do(ctx, abort) + return ctx.Err() + case err := <-fetchErr: + if err != nil { + cleanup.Do(ctx, abort) + return err + } + case <-fetchC[i-fetchOffset]: + } + + diff, err := a.Apply(ctx, desc, mounts, unpack.ApplyOpts...) + if err != nil { + cleanup.Do(ctx, abort) + return fmt.Errorf("failed to extract layer %s: %w", diffIDs[i], err) + } + if diff.Digest != diffIDs[i] { + cleanup.Do(ctx, abort) + return fmt.Errorf("wrong diff id calculated on extraction %q", diffIDs[i]) + } + + if err = sn.Commit(ctx, chainID, key, opts...); err != nil { + cleanup.Do(ctx, abort) + if errdefs.IsAlreadyExists(err) { + return nil + } + return fmt.Errorf("failed to commit snapshot %s: %w", key, err) + } + + // Set the uncompressed label after the uncompressed + // digest has been verified through apply. + cinfo := content.Info{ + Digest: desc.Digest, + Labels: map[string]string{ + labels.LabelUncompressed: diff.Digest.String(), + }, + } + if _, err := cs.Update(ctx, cinfo, "labels."+labels.LabelUncompressed); err != nil { + return err + } + return nil + } + + for i, desc := range layers { + _, layerSpan := tracing.StartSpan(ctx, tracing.Name(unpackSpanPrefix, "unpackLayer")) + layerSpan.SetAttributes( + tracing.Attribute("layer.media.type", desc.MediaType), + tracing.Attribute("layer.media.size", desc.Size), + tracing.Attribute("layer.media.digest", desc.Digest.String()), + ) + if err := doUnpackFn(i, desc); err != nil { + layerSpan.SetStatus(err) + layerSpan.End() + return err + } + layerSpan.End() + } + + chainID := identity.ChainID(chain).String() + cinfo := content.Info{ + Digest: config.Digest, + Labels: map[string]string{ + fmt.Sprintf("containerd.io/gc.ref.snapshot.%s", unpack.SnapshotterKey): chainID, + }, + } + _, err = cs.Update(ctx, cinfo, fmt.Sprintf("labels.containerd.io/gc.ref.snapshot.%s", unpack.SnapshotterKey)) + if err != nil { + return err + } + log.G(ctx).WithFields(log.Fields{ + "config": config.Digest, + "chainID": chainID, + }).Debug("image unpacked") + + return nil +} + +func (u *Unpacker) fetch(ctx context.Context, h images.Handler, layers []ocispec.Descriptor, done []chan struct{}) error { + eg, ctx2 := errgroup.WithContext(ctx) + for i, desc := range layers { + ctx2, layerSpan := tracing.StartSpan(ctx2, tracing.Name(unpackSpanPrefix, "fetchLayer")) + layerSpan.SetAttributes( + tracing.Attribute("layer.media.type", desc.MediaType), + tracing.Attribute("layer.media.size", desc.Size), + tracing.Attribute("layer.media.digest", desc.Digest.String()), + ) + desc := desc + i := i + if err := u.acquire(ctx); err != nil { + return err + } + + eg.Go(func() error { + unlock, err := u.lockBlobDescriptor(ctx2, desc) + if err != nil { + u.release() + return err + } + + _, err = h.Handle(ctx2, desc) + + unlock() + u.release() + + if err != nil && !errors.Is(err, images.ErrSkipDesc) { + return err + } + close(done[i]) + + return nil + }) + layerSpan.End() + } + + return eg.Wait() +} + +func (u *Unpacker) acquire(ctx context.Context) error { + if u.limiter == nil { + return nil + } + return u.limiter.Acquire(ctx, 1) +} + +func (u *Unpacker) release() { + if u.limiter == nil { + return + } + u.limiter.Release(1) +} + +func (u *Unpacker) lockSnChainID(ctx context.Context, chainID, snapshotter string) (func(), error) { + key := u.makeChainIDKeyWithSnapshotter(chainID, snapshotter) + + if err := u.duplicationSuppressor.Lock(ctx, key); err != nil { + return nil, err + } + return func() { + u.duplicationSuppressor.Unlock(key) + }, nil +} + +func (u *Unpacker) lockBlobDescriptor(ctx context.Context, desc ocispec.Descriptor) (func(), error) { + key := u.makeBlobDescriptorKey(desc) + + if err := u.duplicationSuppressor.Lock(ctx, key); err != nil { + return nil, err + } + return func() { + u.duplicationSuppressor.Unlock(key) + }, nil +} + +func (u *Unpacker) makeChainIDKeyWithSnapshotter(chainID, snapshotter string) string { + return fmt.Sprintf("sn://%s/%v", snapshotter, chainID) +} + +func (u *Unpacker) makeBlobDescriptorKey(desc ocispec.Descriptor) string { + return fmt.Sprintf("blob://%v", desc.Digest) +} + +func uniquePart() string { + t := time.Now() + var b [3]byte + // Ignore read failures, just decreases uniqueness + rand.Read(b[:]) + return fmt.Sprintf("%d-%s", t.Nanosecond(), base64.URLEncoding.EncodeToString(b[:])) +} diff --git a/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go b/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go index 4f8d7dd2d597a..c67f773d0a1e9 100644 --- a/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go +++ b/vendor/github.com/containerd/containerd/pkg/userns/userns_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go index 046e0356d19db..8c600fc96b1c7 100644 --- a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go +++ b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go @@ -17,14 +17,9 @@ package platforms import ( - "bufio" - "fmt" - "os" "runtime" - "strings" "sync" - "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" ) @@ -37,95 +32,12 @@ var cpuVariantOnce sync.Once func cpuVariant() string { cpuVariantOnce.Do(func() { if isArmArch(runtime.GOARCH) { - cpuVariantValue = getCPUVariant() + var err error + cpuVariantValue, err = getCPUVariant() + if err != nil { + log.L.Errorf("Error getCPUVariant for OS %s: %v", runtime.GOOS, err) + } } }) return cpuVariantValue } - -// For Linux, the kernel has already detected the ABI, ISA and Features. -// So we don't need to access the ARM registers to detect platform information -// by ourselves. We can just parse these information from /proc/cpuinfo -func getCPUInfo(pattern string) (info string, err error) { - if !isLinuxOS(runtime.GOOS) { - return "", fmt.Errorf("getCPUInfo for OS %s: %w", runtime.GOOS, errdefs.ErrNotImplemented) - } - - cpuinfo, err := os.Open("/proc/cpuinfo") - if err != nil { - return "", err - } - defer cpuinfo.Close() - - // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse - // the first core is enough. - scanner := bufio.NewScanner(cpuinfo) - for scanner.Scan() { - newline := scanner.Text() - list := strings.Split(newline, ":") - - if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { - return strings.TrimSpace(list[1]), nil - } - } - - // Check whether the scanner encountered errors - err = scanner.Err() - if err != nil { - return "", err - } - - return "", fmt.Errorf("getCPUInfo for pattern: %s: %w", pattern, errdefs.ErrNotFound) -} - -func getCPUVariant() string { - if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { - // Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use - // runtime.GOARCH to determine the variants - var variant string - switch runtime.GOARCH { - case "arm64": - variant = "v8" - case "arm": - variant = "v7" - default: - variant = "unknown" - } - - return variant - } - - variant, err := getCPUInfo("Cpu architecture") - if err != nil { - log.L.WithError(err).Error("failure getting variant") - return "" - } - - // handle edge case for Raspberry Pi ARMv6 devices (which due to a kernel quirk, report "CPU architecture: 7") - // https://www.raspberrypi.org/forums/viewtopic.php?t=12614 - if runtime.GOARCH == "arm" && variant == "7" { - model, err := getCPUInfo("model name") - if err == nil && strings.HasPrefix(strings.ToLower(model), "armv6-compatible") { - variant = "6" - } - } - - switch strings.ToLower(variant) { - case "8", "aarch64": - variant = "v8" - case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": - variant = "v7" - case "6", "6tej": - variant = "v6" - case "5", "5t", "5te", "5tej": - variant = "v5" - case "4", "4t": - variant = "v4" - case "3": - variant = "v3" - default: - variant = "unknown" - } - - return variant -} diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo_linux.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo_linux.go new file mode 100644 index 0000000000000..722d86c3578cc --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/cpuinfo_linux.go @@ -0,0 +1,161 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "bufio" + "bytes" + "fmt" + "os" + "runtime" + "strings" + + "github.com/containerd/containerd/errdefs" + "golang.org/x/sys/unix" +) + +// getMachineArch retrieves the machine architecture through system call +func getMachineArch() (string, error) { + var uname unix.Utsname + err := unix.Uname(&uname) + if err != nil { + return "", err + } + + arch := string(uname.Machine[:bytes.IndexByte(uname.Machine[:], 0)]) + + return arch, nil +} + +// For Linux, the kernel has already detected the ABI, ISA and Features. +// So we don't need to access the ARM registers to detect platform information +// by ourselves. We can just parse these information from /proc/cpuinfo +func getCPUInfo(pattern string) (info string, err error) { + + cpuinfo, err := os.Open("/proc/cpuinfo") + if err != nil { + return "", err + } + defer cpuinfo.Close() + + // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse + // the first core is enough. + scanner := bufio.NewScanner(cpuinfo) + for scanner.Scan() { + newline := scanner.Text() + list := strings.Split(newline, ":") + + if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { + return strings.TrimSpace(list[1]), nil + } + } + + // Check whether the scanner encountered errors + err = scanner.Err() + if err != nil { + return "", err + } + + return "", fmt.Errorf("getCPUInfo for pattern %s: %w", pattern, errdefs.ErrNotFound) +} + +// getCPUVariantFromArch get CPU variant from arch through a system call +func getCPUVariantFromArch(arch string) (string, error) { + + var variant string + + arch = strings.ToLower(arch) + + if arch == "aarch64" { + variant = "8" + } else if arch[0:4] == "armv" && len(arch) >= 5 { + //Valid arch format is in form of armvXx + switch arch[3:5] { + case "v8": + variant = "8" + case "v7": + variant = "7" + case "v6": + variant = "6" + case "v5": + variant = "5" + case "v4": + variant = "4" + case "v3": + variant = "3" + default: + variant = "unknown" + } + } else { + return "", fmt.Errorf("getCPUVariantFromArch invalid arch: %s, %w", arch, errdefs.ErrInvalidArgument) + } + return variant, nil +} + +// getCPUVariant returns cpu variant for ARM +// We first try reading "Cpu architecture" field from /proc/cpuinfo +// If we can't find it, then fall back using a system call +// This is to cover running ARM in emulated environment on x86 host as this field in /proc/cpuinfo +// was not present. +func getCPUVariant() (string, error) { + + variant, err := getCPUInfo("Cpu architecture") + if err != nil { + if errdefs.IsNotFound(err) { + //Let's try getting CPU variant from machine architecture + arch, err := getMachineArch() + if err != nil { + return "", fmt.Errorf("failure getting machine architecture: %v", err) + } + + variant, err = getCPUVariantFromArch(arch) + if err != nil { + return "", fmt.Errorf("failure getting CPU variant from machine architecture: %v", err) + } + } else { + return "", fmt.Errorf("failure getting CPU variant: %v", err) + } + } + + // handle edge case for Raspberry Pi ARMv6 devices (which due to a kernel quirk, report "CPU architecture: 7") + // https://www.raspberrypi.org/forums/viewtopic.php?t=12614 + if runtime.GOARCH == "arm" && variant == "7" { + model, err := getCPUInfo("model name") + if err == nil && strings.HasPrefix(strings.ToLower(model), "armv6-compatible") { + variant = "6" + } + } + + switch strings.ToLower(variant) { + case "8", "aarch64": + variant = "v8" + case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": + variant = "v7" + case "6", "6tej": + variant = "v6" + case "5", "5t", "5te", "5tej": + variant = "v5" + case "4", "4t": + variant = "v4" + case "3": + variant = "v3" + default: + variant = "unknown" + } + + return variant, nil +} diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo_other.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo_other.go new file mode 100644 index 0000000000000..fa5f19c427aed --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/cpuinfo_other.go @@ -0,0 +1,59 @@ +//go:build !linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "fmt" + "runtime" + + "github.com/containerd/containerd/errdefs" +) + +func getCPUVariant() (string, error) { + + var variant string + + if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { + // Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use + // runtime.GOARCH to determine the variants + switch runtime.GOARCH { + case "arm64": + variant = "v8" + case "arm": + variant = "v7" + default: + variant = "unknown" + } + } else if runtime.GOOS == "freebsd" { + // FreeBSD supports ARMv6 and ARMv7 as well as ARMv4 and ARMv5 (though deprecated) + // detecting those variants is currently unimplemented + switch runtime.GOARCH { + case "arm64": + variant = "v8" + default: + variant = "unknown" + } + + } else { + return "", fmt.Errorf("getCPUVariant for OS %s: %v", runtime.GOOS, errdefs.ErrNotImplemented) + + } + + return variant, nil +} diff --git a/vendor/github.com/containerd/containerd/platforms/database.go b/vendor/github.com/containerd/containerd/platforms/database.go index dbe9957ca9dbd..2e26fd3b4faed 100644 --- a/vendor/github.com/containerd/containerd/platforms/database.go +++ b/vendor/github.com/containerd/containerd/platforms/database.go @@ -21,13 +21,6 @@ import ( "strings" ) -// isLinuxOS returns true if the operating system is Linux. -// -// The OS value should be normalized before calling this function. -func isLinuxOS(os string) bool { - return os == "linux" -} - // These function are generated from https://golang.org/src/go/build/syslist.go. // // We use switch statements because they are slightly faster than map lookups diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go b/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go index e249fe48d3835..72355ca85fd91 100644 --- a/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go +++ b/vendor/github.com/containerd/containerd/platforms/defaults_darwin.go @@ -1,5 +1,4 @@ //go:build darwin -// +build darwin /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_freebsd.go b/vendor/github.com/containerd/containerd/platforms/defaults_freebsd.go new file mode 100644 index 0000000000000..d3fe89e076685 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults_freebsd.go @@ -0,0 +1,43 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Ordered(DefaultSpec(), specs.Platform{ + OS: "linux", + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + }) +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go index 49690f1b3e772..44acc47eb3243 100644 --- a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go +++ b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go @@ -1,5 +1,4 @@ -//go:build !windows && !darwin -// +build !windows,!darwin +//go:build !windows && !darwin && !freebsd /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go index ff9771a600323..fd5756516cfc9 100644 --- a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go +++ b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go @@ -22,7 +22,6 @@ import ( "strconv" "strings" - imagespec "github.com/opencontainers/image-spec/specs-go/v1" specs "github.com/opencontainers/image-spec/specs-go/v1" "golang.org/x/sys/windows" ) @@ -39,29 +38,31 @@ func DefaultSpec() specs.Platform { } } -type matchComparer struct { - defaults Matcher +type windowsmatcher struct { + specs.Platform osVersionPrefix string + defaultMatcher Matcher } // Match matches platform with the same windows major, minor // and build version. -func (m matchComparer) Match(p specs.Platform) bool { - match := m.defaults.Match(p) +func (m windowsmatcher) Match(p specs.Platform) bool { + match := m.defaultMatcher.Match(p) - if match && p.OS == "windows" { + if match && m.OS == "windows" { if strings.HasPrefix(p.OSVersion, m.osVersionPrefix) { return true } return p.OSVersion == "" } - return false + + return match } // Less sorts matched platforms in front of other platforms. // For matched platforms, it puts platforms with larger revision // number in front. -func (m matchComparer) Less(p1, p2 imagespec.Platform) bool { +func (m windowsmatcher) Less(p1, p2 specs.Platform) bool { m1, m2 := m.Match(p1), m.Match(p2) if m1 && m2 { r1, r2 := revision(p1.OSVersion), revision(p2.OSVersion) @@ -82,14 +83,15 @@ func revision(v string) int { return r } +func prefix(v string) string { + parts := strings.Split(v, ".") + if len(parts) < 4 { + return v + } + return strings.Join(parts[0:3], ".") +} + // Default returns the current platform's default platform specification. func Default() MatchComparer { - major, minor, build := windows.RtlGetNtVersionNumbers() - return matchComparer{ - defaults: Ordered(DefaultSpec(), specs.Platform{ - OS: "linux", - Architecture: runtime.GOARCH, - }), - osVersionPrefix: fmt.Sprintf("%d.%d.%d", major, minor, build), - } + return Only(DefaultSpec()) } diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/containerd/platforms/platforms.go index 234309941803b..8dcde7db7c8f1 100644 --- a/vendor/github.com/containerd/containerd/platforms/platforms.go +++ b/vendor/github.com/containerd/containerd/platforms/platforms.go @@ -114,14 +114,18 @@ import ( "strconv" "strings" - "github.com/containerd/containerd/errdefs" specs "github.com/opencontainers/image-spec/specs-go/v1" + + "github.com/containerd/containerd/errdefs" ) var ( specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`) ) +// Platform is a type alias for convenience, so there is no need to import image-spec package everywhere. +type Platform = specs.Platform + // Matcher matches platforms specifications, provided by an image or runtime. type Matcher interface { Match(platform specs.Platform) bool @@ -136,9 +140,7 @@ type Matcher interface { // // Applications should opt to use `Match` over directly parsing specifiers. func NewMatcher(platform specs.Platform) Matcher { - return &matcher{ - Platform: Normalize(platform), - } + return newDefaultMatcher(platform) } type matcher struct { @@ -257,5 +259,6 @@ func Format(platform specs.Platform) string { func Normalize(platform specs.Platform) specs.Platform { platform.OS = normalizeOS(platform.OS) platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant) + return platform } diff --git a/vendor/github.com/containerd/containerd/platforms/platforms_other.go b/vendor/github.com/containerd/containerd/platforms/platforms_other.go new file mode 100644 index 0000000000000..03f4dcd998148 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/platforms_other.go @@ -0,0 +1,30 @@ +//go:build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// NewMatcher returns the default Matcher for containerd +func newDefaultMatcher(platform specs.Platform) Matcher { + return &matcher{ + Platform: Normalize(platform), + } +} diff --git a/vendor/github.com/containerd/containerd/sys/epoll.go b/vendor/github.com/containerd/containerd/platforms/platforms_windows.go similarity index 55% rename from vendor/github.com/containerd/containerd/sys/epoll.go rename to vendor/github.com/containerd/containerd/platforms/platforms_windows.go index 73a57013ff13a..950e2a2ddbb57 100644 --- a/vendor/github.com/containerd/containerd/sys/epoll.go +++ b/vendor/github.com/containerd/containerd/platforms/platforms_windows.go @@ -1,6 +1,3 @@ -//go:build linux -// +build linux - /* Copyright The containerd Authors. @@ -17,18 +14,21 @@ limitations under the License. */ -package sys - -import "golang.org/x/sys/unix" - -// EpollCreate1 is an alias for unix.EpollCreate1 -// Deprecated: use golang.org/x/sys/unix.EpollCreate1 -var EpollCreate1 = unix.EpollCreate1 - -// EpollCtl is an alias for unix.EpollCtl -// Deprecated: use golang.org/x/sys/unix.EpollCtl -var EpollCtl = unix.EpollCtl - -// EpollWait is an alias for unix.EpollWait -// Deprecated: use golang.org/x/sys/unix.EpollWait -var EpollWait = unix.EpollWait +package platforms + +import ( + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// NewMatcher returns a Windows matcher that will match on osVersionPrefix if +// the platform is Windows otherwise use the default matcher +func newDefaultMatcher(platform specs.Platform) Matcher { + prefix := prefix(platform.OSVersion) + return windowsmatcher{ + Platform: platform, + osVersionPrefix: prefix, + defaultMatcher: &matcher{ + Platform: Normalize(platform), + }, + } +} diff --git a/vendor/github.com/containerd/containerd/plugin/plugin.go b/vendor/github.com/containerd/containerd/plugin/plugin.go index eb38c127157fd..082f3178b0d9a 100644 --- a/vendor/github.com/containerd/containerd/plugin/plugin.go +++ b/vendor/github.com/containerd/containerd/plugin/plugin.go @@ -76,8 +76,20 @@ const ( GCPlugin Type = "io.containerd.gc.v1" // EventPlugin implements event handling EventPlugin Type = "io.containerd.event.v1" + // LeasePlugin implements lease manager + LeasePlugin Type = "io.containerd.lease.v1" + // Streaming implements a stream manager + StreamingPlugin Type = "io.containerd.streaming.v1" // TracingProcessorPlugin implements a open telemetry span processor TracingProcessorPlugin Type = "io.containerd.tracing.processor.v1" + // NRIApiPlugin implements the NRI adaptation interface for containerd. + NRIApiPlugin Type = "io.containerd.nri.v1" + // TransferPlugin implements a transfer service + TransferPlugin Type = "io.containerd.transfer.v1" + // SandboxStorePlugin implements a sandbox store + SandboxStorePlugin Type = "io.containerd.sandbox.store.v1" + // SandboxControllerPlugin implements a sandbox controller + SandboxControllerPlugin Type = "io.containerd.sandbox.controller.v1" ) const ( diff --git a/vendor/github.com/containerd/containerd/plugin/plugin_go18.go b/vendor/github.com/containerd/containerd/plugin/plugin_go18.go index 0df0669d29a05..0ee442d9553de 100644 --- a/vendor/github.com/containerd/containerd/plugin/plugin_go18.go +++ b/vendor/github.com/containerd/containerd/plugin/plugin_go18.go @@ -1,5 +1,4 @@ //go:build go1.8 && !windows && amd64 && !static_build && !gccgo -// +build go1.8,!windows,amd64,!static_build,!gccgo /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/plugin/plugin_other.go b/vendor/github.com/containerd/containerd/plugin/plugin_other.go index a2883bbbadf46..c0b53a6b4d137 100644 --- a/vendor/github.com/containerd/containerd/plugin/plugin_other.go +++ b/vendor/github.com/containerd/containerd/plugin/plugin_other.go @@ -1,5 +1,4 @@ //go:build !go1.8 || windows || !amd64 || static_build || gccgo -// +build !go1.8 windows !amd64 static_build gccgo /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/process.go b/vendor/github.com/containerd/containerd/process.go index 297c77ab6e03d..73d8f86625817 100644 --- a/vendor/github.com/containerd/containerd/process.go +++ b/vendor/github.com/containerd/containerd/process.go @@ -26,6 +26,7 @@ import ( "github.com/containerd/containerd/api/services/tasks/v1" "github.com/containerd/containerd/cio" "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/protobuf" ) // Process represents a system process @@ -166,7 +167,7 @@ func (p *process) Wait(ctx context.Context) (<-chan ExitStatus, error) { } c <- ExitStatus{ code: r.ExitStatus, - exitedAt: r.ExitedAt, + exitedAt: protobuf.FromTimestamp(r.ExitedAt), } }() return c, nil @@ -226,7 +227,7 @@ func (p *process) Delete(ctx context.Context, opts ...ProcessDeleteOpts) (*ExitS p.io.Wait() p.io.Close() } - return &ExitStatus{code: r.ExitStatus, exitedAt: r.ExitedAt}, nil + return &ExitStatus{code: r.ExitStatus, exitedAt: protobuf.FromTimestamp(r.ExitedAt)}, nil } func (p *process) Status(ctx context.Context) (Status, error) { diff --git a/vendor/github.com/containerd/containerd/protobuf/any.go b/vendor/github.com/containerd/containerd/protobuf/any.go new file mode 100644 index 0000000000000..84da2a33daf08 --- /dev/null +++ b/vendor/github.com/containerd/containerd/protobuf/any.go @@ -0,0 +1,47 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package protobuf + +import ( + "github.com/containerd/typeurl/v2" + "google.golang.org/protobuf/types/known/anypb" +) + +// FromAny converts typeurl.Any to github.com/containerd/containerd/protobuf/types.Any. +func FromAny(from typeurl.Any) *anypb.Any { + if from == nil { + return nil + } + + if pbany, ok := from.(*anypb.Any); ok { + return pbany + } + + return &anypb.Any{ + TypeUrl: from.GetTypeUrl(), + Value: from.GetValue(), + } +} + +// FromAny converts an arbitrary interface to github.com/containerd/containerd/protobuf/types.Any. +func MarshalAnyToProto(from interface{}) (*anypb.Any, error) { + any, err := typeurl.MarshalAny(from) + if err != nil { + return nil, err + } + return FromAny(any), nil +} diff --git a/vendor/github.com/containerd/containerd/protobuf/compare.go b/vendor/github.com/containerd/containerd/protobuf/compare.go new file mode 100644 index 0000000000000..602a4bcac5786 --- /dev/null +++ b/vendor/github.com/containerd/containerd/protobuf/compare.go @@ -0,0 +1,41 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package protobuf + +import ( + "github.com/google/go-cmp/cmp" + "google.golang.org/protobuf/proto" +) + +var Compare = cmp.FilterValues( + func(x, y interface{}) bool { + _, xok := x.(proto.Message) + _, yok := y.(proto.Message) + return xok && yok + }, + cmp.Comparer(func(x, y interface{}) bool { + vx, ok := x.(proto.Message) + if !ok { + return false + } + vy, ok := y.(proto.Message) + if !ok { + return false + } + return proto.Equal(vx, vy) + }), +) diff --git a/vendor/github.com/containerd/containerd/mount/subprocess_unsafe_linux.s b/vendor/github.com/containerd/containerd/protobuf/plugin/doc.go similarity index 97% rename from vendor/github.com/containerd/containerd/mount/subprocess_unsafe_linux.s rename to vendor/github.com/containerd/containerd/protobuf/plugin/doc.go index c073fa4ad7cc8..401a6d5ccb2ab 100644 --- a/vendor/github.com/containerd/containerd/mount/subprocess_unsafe_linux.s +++ b/vendor/github.com/containerd/containerd/protobuf/plugin/doc.go @@ -13,3 +13,5 @@ See the License for the specific language governing permissions and limitations under the License. */ + +package plugin diff --git a/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.pb.go b/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.pb.go new file mode 100644 index 0000000000000..1bab0c77666ec --- /dev/null +++ b/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.pb.go @@ -0,0 +1,144 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 +// source: github.com/containerd/containerd/protobuf/plugin/fieldpath.proto + +package plugin + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63300, + Name: "containerd.plugin.fieldpath_all", + Tag: "varint,63300,opt,name=fieldpath_all", + Filename: "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto", + }, + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64400, + Name: "containerd.plugin.fieldpath", + Tag: "varint,64400,opt,name=fieldpath", + Filename: "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto", + }, +} + +// Extension fields to descriptorpb.FileOptions. +var ( + // optional bool fieldpath_all = 63300; + E_FieldpathAll = &file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_extTypes[0] +) + +// Extension fields to descriptorpb.MessageOptions. +var ( + // optional bool fieldpath = 64400; + E_Fieldpath = &file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_extTypes[1] +) + +var File_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_rawDesc = []byte{ + 0x0a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x11, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, + 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x43, 0x0a, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x70, 0x61, 0x74, 0x68, 0x5f, 0x61, 0x6c, 0x6c, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc4, 0xee, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, 0x41, 0x6c, 0x6c, 0x3a, 0x3f, 0x0a, 0x09, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x90, 0xf7, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, 0x42, 0x32, 0x5a, + 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, +} + +var file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_goTypes = []interface{}{ + (*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions + (*descriptorpb.MessageOptions)(nil), // 1: google.protobuf.MessageOptions +} +var file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_depIdxs = []int32{ + 0, // 0: containerd.plugin.fieldpath_all:extendee -> google.protobuf.FileOptions + 1, // 1: containerd.plugin.fieldpath:extendee -> google.protobuf.MessageOptions + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 0, // [0:2] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_init() } +func file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_init() { + if File_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 2, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_depIdxs, + ExtensionInfos: file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_extTypes, + }.Build() + File_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto = out.File + file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_rawDesc = nil + file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_goTypes = nil + file_github_com_containerd_containerd_protobuf_plugin_fieldpath_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.proto b/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.proto new file mode 100644 index 0000000000000..de98dd899f4c0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.proto @@ -0,0 +1,42 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; +package containerd.plugin; + +import "google/protobuf/descriptor.proto"; + +option go_package = "github.com/containerd/containerd/protobuf/plugin"; + +extend google.protobuf.FileOptions { + optional bool fieldpath_all = 63300; +} + +extend google.protobuf.MessageOptions { + optional bool fieldpath = 64400; +} diff --git a/vendor/github.com/containerd/containerd/protobuf/proto/proto.go b/vendor/github.com/containerd/containerd/protobuf/proto/proto.go new file mode 100644 index 0000000000000..6c5e7b75e3add --- /dev/null +++ b/vendor/github.com/containerd/containerd/protobuf/proto/proto.go @@ -0,0 +1,30 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package proto provides convinient aliases that make google.golang.org/protobuf migration easier. +package proto + +import ( + google "google.golang.org/protobuf/proto" +) + +func Marshal(input google.Message) ([]byte, error) { + return google.Marshal(input) +} + +func Unmarshal(input []byte, output google.Message) error { + return google.Unmarshal(input, output) +} diff --git a/vendor/github.com/containerd/containerd/protobuf/timestamp.go b/vendor/github.com/containerd/containerd/protobuf/timestamp.go new file mode 100644 index 0000000000000..0615f823b04a5 --- /dev/null +++ b/vendor/github.com/containerd/containerd/protobuf/timestamp.go @@ -0,0 +1,36 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package protobuf + +import ( + "time" + + "google.golang.org/protobuf/types/known/timestamppb" +) + +// Once we migrate off from gogo/protobuf, we can use the function below, which don't return any errors. +// https://github.com/protocolbuffers/protobuf-go/blob/v1.28.0/types/known/timestamppb/timestamp.pb.go#L200-L208 + +// ToTimestamp creates protobuf's Timestamp from time.Time. +func ToTimestamp(from time.Time) *timestamppb.Timestamp { + return timestamppb.New(from) +} + +// FromTimestamp creates time.Time from protobuf's Timestamp. +func FromTimestamp(from *timestamppb.Timestamp) time.Time { + return from.AsTime() +} diff --git a/vendor/github.com/containerd/containerd/protobuf/types/types.go b/vendor/github.com/containerd/containerd/protobuf/types/types.go new file mode 100644 index 0000000000000..1a920a2f98e71 --- /dev/null +++ b/vendor/github.com/containerd/containerd/protobuf/types/types.go @@ -0,0 +1,28 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package types provides convinient aliases that make google.golang.org/protobuf migration easier. +package types + +import ( + "google.golang.org/genproto/protobuf/field_mask" + "google.golang.org/protobuf/types/known/anypb" + "google.golang.org/protobuf/types/known/emptypb" +) + +type Empty = emptypb.Empty +type Any = anypb.Any +type FieldMask = field_mask.FieldMask diff --git a/vendor/github.com/containerd/containerd/pull.go b/vendor/github.com/containerd/containerd/pull.go index 92f7719b1f7f5..5d96c8cc72d15 100644 --- a/vendor/github.com/containerd/containerd/pull.go +++ b/vendor/github.com/containerd/containerd/pull.go @@ -23,19 +23,28 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" + "github.com/containerd/containerd/pkg/unpack" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" - "github.com/containerd/containerd/remotes/docker/schema1" + "github.com/containerd/containerd/remotes/docker/schema1" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. + "github.com/containerd/containerd/tracing" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/sync/errgroup" "golang.org/x/sync/semaphore" ) +const ( + pullSpanPrefix = "pull" +) + // Pull downloads the provided content into containerd's content store // and returns a platform specific image object func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (_ Image, retErr error) { + ctx, span := tracing.StartSpan(ctx, tracing.Name(pullSpanPrefix, "Pull")) + defer span.End() + pullCtx := defaultRemoteContext() + for _, o := range opts { if err := o(c, pullCtx); err != nil { return nil, err @@ -57,25 +66,60 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (_ Ima } } + span.SetAttributes( + tracing.Attribute("image.ref", ref), + tracing.Attribute("unpack", pullCtx.Unpack), + tracing.Attribute("max.concurrent.downloads", pullCtx.MaxConcurrentDownloads), + tracing.Attribute("platforms.count", len(pullCtx.Platforms)), + ) + ctx, done, err := c.WithLease(ctx) if err != nil { return nil, err } defer done(ctx) - var unpacks int32 - var unpackEg *errgroup.Group - var unpackWrapper func(f images.Handler) images.Handler + var unpacker *unpack.Unpacker if pullCtx.Unpack { - // unpacker only supports schema 2 image, for schema 1 this is noop. - u, err := c.newUnpacker(ctx, pullCtx) + snapshotterName, err := c.resolveSnapshotterName(ctx, pullCtx.Snapshotter) + if err != nil { + return nil, fmt.Errorf("unable to resolve snapshotter: %w", err) + } + span.SetAttributes(tracing.Attribute("snapshotter.name", snapshotterName)) + var uconfig UnpackConfig + for _, opt := range pullCtx.UnpackOpts { + if err := opt(ctx, &uconfig); err != nil { + return nil, err + } + } + var platformMatcher platforms.Matcher + if !uconfig.CheckPlatformSupported { + platformMatcher = platforms.All + } + + // Check client Unpack config + platform := unpack.Platform{ + Platform: platformMatcher, + SnapshotterKey: snapshotterName, + Snapshotter: c.SnapshotService(snapshotterName), + SnapshotOpts: append(pullCtx.SnapshotterOpts, uconfig.SnapshotOpts...), + Applier: c.DiffService(), + ApplyOpts: uconfig.ApplyOpts, + } + uopts := []unpack.UnpackerOpt{unpack.WithUnpackPlatform(platform)} + if pullCtx.MaxConcurrentDownloads > 0 { + uopts = append(uopts, unpack.WithLimiter(semaphore.NewWeighted(int64(pullCtx.MaxConcurrentDownloads)))) + } + if uconfig.DuplicationSuppressor != nil { + uopts = append(uopts, unpack.WithDuplicationSuppressor(uconfig.DuplicationSuppressor)) + } + unpacker, err = unpack.NewUnpacker(ctx, c.ContentStore(), uopts...) if err != nil { - return nil, fmt.Errorf("create unpacker: %w", err) + return nil, fmt.Errorf("unable to initialize unpacker: %w", err) } - unpackWrapper, unpackEg = u.handlerWrapper(ctx, pullCtx, &unpacks) defer func() { - if err := unpackEg.Wait(); err != nil { + if _, err := unpacker.Wait(); err != nil { if retErr == nil { retErr = fmt.Errorf("unpack: %w", err) } @@ -84,9 +128,9 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (_ Ima wrapper := pullCtx.HandlerWrapper pullCtx.HandlerWrapper = func(h images.Handler) images.Handler { if wrapper == nil { - return unpackWrapper(h) + return unpacker.Unpack(h) } - return unpackWrapper(wrapper(h)) + return unpacker.Unpack(wrapper(h)) } } @@ -98,12 +142,15 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (_ Ima // NOTE(fuweid): unpacker defers blobs download. before create image // record in ImageService, should wait for unpacking(including blobs // download). - if pullCtx.Unpack { - if unpackEg != nil { - if err := unpackEg.Wait(); err != nil { - return nil, err - } + var ur unpack.Result + if unpacker != nil { + _, unpackSpan := tracing.StartSpan(ctx, tracing.Name(pullSpanPrefix, "UnpackWait")) + if ur, err = unpacker.Wait(); err != nil { + unpackSpan.SetStatus(err) + unpackSpan.End() + return nil, err } + unpackSpan.End() } img, err = c.createNewImage(ctx, img) @@ -112,14 +159,13 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (_ Ima } i := NewImageWithPlatform(c, img, pullCtx.PlatformMatcher) + span.SetAttributes(tracing.Attribute("image.ref", i.Name())) - if pullCtx.Unpack { - if unpacks == 0 { - // Try to unpack is none is done previously. - // This is at least required for schema 1 image. - if err := i.Unpack(ctx, pullCtx.Snapshotter, pullCtx.UnpackOpts...); err != nil { - return nil, fmt.Errorf("failed to unpack image on snapshotter %s: %w", pullCtx.Snapshotter, err) - } + if unpacker != nil && ur.Unpacks == 0 { + // Unpack was tried previously but nothing was unpacked + // This is at least required for schema 1 image. + if err := i.Unpack(ctx, pullCtx.Snapshotter, pullCtx.UnpackOpts...); err != nil { + return nil, fmt.Errorf("failed to unpack image on snapshotter %s: %w", pullCtx.Snapshotter, err) } } @@ -127,6 +173,8 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (_ Ima } func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, limit int) (images.Image, error) { + ctx, span := tracing.StartSpan(ctx, tracing.Name(pullSpanPrefix, "fetch")) + defer span.End() store := c.ContentStore() name, desc, err := rCtx.Resolver.Resolve(ctx, ref) if err != nil { @@ -230,6 +278,8 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim } func (c *Client) createNewImage(ctx context.Context, img images.Image) (images.Image, error) { + ctx, span := tracing.StartSpan(ctx, tracing.Name(pullSpanPrefix, "pull.createNewImage")) + defer span.End() is := c.ImageService() for { if created, err := is.Create(ctx, img); err != nil { diff --git a/vendor/github.com/containerd/containerd/reference/docker/helpers.go b/vendor/github.com/containerd/containerd/reference/docker/helpers.go new file mode 100644 index 0000000000000..386025104ae92 --- /dev/null +++ b/vendor/github.com/containerd/containerd/reference/docker/helpers.go @@ -0,0 +1,58 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import "path" + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// FamiliarName returns the familiar name string +// for the given named, familiarizing if needed. +func FamiliarName(ref Named) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().Name() + } + return ref.Name() +} + +// FamiliarString returns the familiar string representation +// for the given reference, familiarizing if needed. +func FamiliarString(ref Reference) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().String() + } + return ref.String() +} + +// FamiliarMatch reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func FamiliarMatch(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, FamiliarString(ref)) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, FamiliarName(namedRef)) + } + return matched, err +} diff --git a/vendor/github.com/containerd/containerd/reference/docker/normalize.go b/vendor/github.com/containerd/containerd/reference/docker/normalize.go new file mode 100644 index 0000000000000..b299bf6c0686e --- /dev/null +++ b/vendor/github.com/containerd/containerd/reference/docker/normalize.go @@ -0,0 +1,196 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +var ( + legacyDefaultDomain = "index.docker.io" + defaultDomain = "docker.io" + officialRepoName = "library" + defaultTag = "latest" +) + +// normalizedNamed represents a name which has been +// normalized and has a familiar form. A familiar name +// is what is used in Docker UI. An example normalized +// name is "docker.io/library/ubuntu" and corresponding +// familiar name of "ubuntu". +type normalizedNamed interface { + Named + Familiar() Named +} + +// ParseNormalizedNamed parses a string into a named reference +// transforming a familiar name from Docker UI to a fully +// qualified reference. If the value may be an identifier +// use ParseAnyReference. +func ParseNormalizedNamed(s string) (Named, error) { + if ok := anchoredIdentifierRegexp.MatchString(s); ok { + return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) + } + domain, remainder := splitDockerDomain(s) + var remoteName string + if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { + remoteName = remainder[:tagSep] + } else { + remoteName = remainder + } + if strings.ToLower(remoteName) != remoteName { + return nil, fmt.Errorf("invalid reference format: repository name (%s) must be lowercase", remoteName) + } + + ref, err := Parse(domain + "/" + remainder) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// ParseDockerRef normalizes the image reference following the docker convention. This is added +// mainly for backward compatibility. +// The reference returned can only be either tagged or digested. For reference contains both tag +// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ +// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as +// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. +func ParseDockerRef(ref string) (Named, error) { + named, err := ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + if _, ok := named.(NamedTagged); ok { + if canonical, ok := named.(Canonical); ok { + // The reference is both tagged and digested, only + // return digested. + newNamed, err := WithName(canonical.Name()) + if err != nil { + return nil, err + } + newCanonical, err := WithDigest(newNamed, canonical.Digest()) + if err != nil { + return nil, err + } + return newCanonical, nil + } + } + return TagNameOnly(named), nil +} + +// splitDockerDomain splits a repository name to domain and remotename string. +// If no valid domain is found, the default domain is used. Repository name +// needs to be already validated before. +func splitDockerDomain(name string) (domain, remainder string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost" && strings.ToLower(name[:i]) == name[:i]) { + domain, remainder = defaultDomain, name + } else { + domain, remainder = name[:i], name[i+1:] + } + if domain == legacyDefaultDomain { + domain = defaultDomain + } + if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { + remainder = officialRepoName + "/" + remainder + } + return +} + +// familiarizeName returns a shortened version of the name familiar +// to the Docker UI. Familiar names have the default domain +// "docker.io" and "library/" repository prefix removed. +// For example, "docker.io/library/redis" will have the familiar +// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". +// Returns a familiarized named only reference. +func familiarizeName(named namedRepository) repository { + repo := repository{ + domain: named.Domain(), + path: named.Path(), + } + + if repo.domain == defaultDomain { + repo.domain = "" + // Handle official repositories which have the pattern "library/" + if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { + repo.path = split[1] + } + } + return repo +} + +func (r reference) Familiar() Named { + return reference{ + namedRepository: familiarizeName(r.namedRepository), + tag: r.tag, + digest: r.digest, + } +} + +func (r repository) Familiar() Named { + return familiarizeName(r) +} + +func (t taggedReference) Familiar() Named { + return taggedReference{ + namedRepository: familiarizeName(t.namedRepository), + tag: t.tag, + } +} + +func (c canonicalReference) Familiar() Named { + return canonicalReference{ + namedRepository: familiarizeName(c.namedRepository), + digest: c.digest, + } +} + +// TagNameOnly adds the default tag "latest" to a reference if it only has +// a repo name. +func TagNameOnly(ref Named) Named { + if IsNameOnly(ref) { + namedTagged, err := WithTag(ref, defaultTag) + if err != nil { + // Default tag must be valid, to create a NamedTagged + // type with non-validated input the WithTag function + // should be used instead + panic(err) + } + return namedTagged + } + return ref +} + +// ParseAnyReference parses a reference string as a possible identifier, +// full digest, or familiar name. +func ParseAnyReference(ref string) (Reference, error) { + if ok := anchoredIdentifierRegexp.MatchString(ref); ok { + return digestReference("sha256:" + ref), nil + } + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + + return ParseNormalizedNamed(ref) +} diff --git a/vendor/github.com/containerd/containerd/reference/docker/reference.go b/vendor/github.com/containerd/containerd/reference/docker/reference.go index 25436b645550d..4dc00474ee5ef 100644 --- a/vendor/github.com/containerd/containerd/reference/docker/reference.go +++ b/vendor/github.com/containerd/containerd/reference/docker/reference.go @@ -21,7 +21,9 @@ // // reference := name [ ":" tag ] [ "@" digest ] // name := [domain '/'] path-component ['/' path-component]* -// domain := domain-component ['.' domain-component]* [':' port-number] +// domain := host [':' port-number] +// host := domain-name | IPv4address | \[ IPv6address \] ; rfc3986 appendix-A +// domain-name := domain-component ['.' domain-component]* // domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ // port-number := /[0-9]+/ // path-component := alpha-numeric [separator alpha-numeric]* @@ -43,8 +45,6 @@ package docker import ( "errors" "fmt" - "path" - "regexp" "strings" "github.com/opencontainers/go-digest" @@ -451,349 +451,3 @@ func (c canonicalReference) String() string { func (c canonicalReference) Digest() digest.Digest { return c.digest } - -var ( - // alphaNumericRegexp defines the alpha numeric atom, typically a - // component of names. This only allows lower case characters and digits. - alphaNumericRegexp = match(`[a-z0-9]+`) - - // separatorRegexp defines the separators allowed to be embedded in name - // components. This allow one period, one or two underscore and multiple - // dashes. - separatorRegexp = match(`(?:[._]|__|[-]*)`) - - // nameComponentRegexp restricts registry path component names to start - // with at least one letter or number, with following parts able to be - // separated by one period, one or two underscore and multiple dashes. - nameComponentRegexp = expression( - alphaNumericRegexp, - optional(repeated(separatorRegexp, alphaNumericRegexp))) - - // domainComponentRegexp restricts the registry domain component of a - // repository name to start with a component as defined by DomainRegexp - // and followed by an optional port. - domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) - - // DomainRegexp defines the structure of potential domain components - // that may be part of image names. This is purposely a subset of what is - // allowed by DNS to ensure backwards compatibility with Docker image - // names. - DomainRegexp = expression( - domainComponentRegexp, - optional(repeated(literal(`.`), domainComponentRegexp)), - optional(literal(`:`), match(`[0-9]+`))) - - // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. - TagRegexp = match(`[\w][\w.-]{0,127}`) - - // anchoredTagRegexp matches valid tag names, anchored at the start and - // end of the matched string. - anchoredTagRegexp = anchored(TagRegexp) - - // DigestRegexp matches valid digests. - DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) - - // anchoredDigestRegexp matches valid digests, anchored at the start and - // end of the matched string. - anchoredDigestRegexp = anchored(DigestRegexp) - - // NameRegexp is the format for the name component of references. The - // regexp has capturing groups for the domain and name part omitting - // the separating forward slash from either. - NameRegexp = expression( - optional(DomainRegexp, literal(`/`)), - nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp))) - - // anchoredNameRegexp is used to parse a name value, capturing the - // domain and trailing components. - anchoredNameRegexp = anchored( - optional(capture(DomainRegexp), literal(`/`)), - capture(nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp)))) - - // ReferenceRegexp is the full supported format of a reference. The regexp - // is anchored and has capturing groups for name, tag, and digest - // components. - ReferenceRegexp = anchored(capture(NameRegexp), - optional(literal(":"), capture(TagRegexp)), - optional(literal("@"), capture(DigestRegexp))) - - // IdentifierRegexp is the format for string identifier used as a - // content addressable identifier using sha256. These identifiers - // are like digests without the algorithm, since sha256 is used. - IdentifierRegexp = match(`([a-f0-9]{64})`) - - // ShortIdentifierRegexp is the format used to represent a prefix - // of an identifier. A prefix may be used to match a sha256 identifier - // within a list of trusted identifiers. - ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) - - // anchoredIdentifierRegexp is used to check or match an - // identifier value, anchored at start and end of string. - anchoredIdentifierRegexp = anchored(IdentifierRegexp) -) - -// match compiles the string to a regular expression. -var match = regexp.MustCompile - -// literal compiles s into a literal regular expression, escaping any regexp -// reserved characters. -func literal(s string) *regexp.Regexp { - re := match(regexp.QuoteMeta(s)) - - if _, complete := re.LiteralPrefix(); !complete { - panic("must be a literal") - } - - return re -} - -// expression defines a full expression, where each regular expression must -// follow the previous. -func expression(res ...*regexp.Regexp) *regexp.Regexp { - var s string - for _, re := range res { - s += re.String() - } - - return match(s) -} - -// optional wraps the expression in a non-capturing group and makes the -// production optional. -func optional(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `?`) -} - -// repeated wraps the regexp in a non-capturing group to get one or more -// matches. -func repeated(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `+`) -} - -// group wraps the regexp in a non-capturing group. -func group(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(?:` + expression(res...).String() + `)`) -} - -// capture wraps the expression in a capturing group. -func capture(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(` + expression(res...).String() + `)`) -} - -// anchored anchors the regular expression by adding start and end delimiters. -func anchored(res ...*regexp.Regexp) *regexp.Regexp { - return match(`^` + expression(res...).String() + `$`) -} - -var ( - legacyDefaultDomain = "index.docker.io" - defaultDomain = "docker.io" - officialRepoName = "library" - defaultTag = "latest" -) - -// normalizedNamed represents a name which has been -// normalized and has a familiar form. A familiar name -// is what is used in Docker UI. An example normalized -// name is "docker.io/library/ubuntu" and corresponding -// familiar name of "ubuntu". -type normalizedNamed interface { - Named - Familiar() Named -} - -// ParseNormalizedNamed parses a string into a named reference -// transforming a familiar name from Docker UI to a fully -// qualified reference. If the value may be an identifier -// use ParseAnyReference. -func ParseNormalizedNamed(s string) (Named, error) { - if ok := anchoredIdentifierRegexp.MatchString(s); ok { - return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) - } - domain, remainder := splitDockerDomain(s) - var remoteName string - if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { - remoteName = remainder[:tagSep] - } else { - remoteName = remainder - } - if strings.ToLower(remoteName) != remoteName { - return nil, errors.New("invalid reference format: repository name must be lowercase") - } - - ref, err := Parse(domain + "/" + remainder) - if err != nil { - return nil, err - } - named, isNamed := ref.(Named) - if !isNamed { - return nil, fmt.Errorf("reference %s has no name", ref.String()) - } - return named, nil -} - -// ParseDockerRef normalizes the image reference following the docker convention. This is added -// mainly for backward compatibility. -// The reference returned can only be either tagged or digested. For reference contains both tag -// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ -// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as -// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. -func ParseDockerRef(ref string) (Named, error) { - named, err := ParseNormalizedNamed(ref) - if err != nil { - return nil, err - } - if _, ok := named.(NamedTagged); ok { - if canonical, ok := named.(Canonical); ok { - // The reference is both tagged and digested, only - // return digested. - newNamed, err := WithName(canonical.Name()) - if err != nil { - return nil, err - } - newCanonical, err := WithDigest(newNamed, canonical.Digest()) - if err != nil { - return nil, err - } - return newCanonical, nil - } - } - return TagNameOnly(named), nil -} - -// splitDockerDomain splits a repository name to domain and remotename string. -// If no valid domain is found, the default domain is used. Repository name -// needs to be already validated before. -func splitDockerDomain(name string) (domain, remainder string) { - i := strings.IndexRune(name, '/') - if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { - domain, remainder = defaultDomain, name - } else { - domain, remainder = name[:i], name[i+1:] - } - if domain == legacyDefaultDomain { - domain = defaultDomain - } - if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { - remainder = officialRepoName + "/" + remainder - } - return -} - -// familiarizeName returns a shortened version of the name familiar -// to to the Docker UI. Familiar names have the default domain -// "docker.io" and "library/" repository prefix removed. -// For example, "docker.io/library/redis" will have the familiar -// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". -// Returns a familiarized named only reference. -func familiarizeName(named namedRepository) repository { - repo := repository{ - domain: named.Domain(), - path: named.Path(), - } - - if repo.domain == defaultDomain { - repo.domain = "" - // Handle official repositories which have the pattern "library/" - if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { - repo.path = split[1] - } - } - return repo -} - -func (r reference) Familiar() Named { - return reference{ - namedRepository: familiarizeName(r.namedRepository), - tag: r.tag, - digest: r.digest, - } -} - -func (r repository) Familiar() Named { - return familiarizeName(r) -} - -func (t taggedReference) Familiar() Named { - return taggedReference{ - namedRepository: familiarizeName(t.namedRepository), - tag: t.tag, - } -} - -func (c canonicalReference) Familiar() Named { - return canonicalReference{ - namedRepository: familiarizeName(c.namedRepository), - digest: c.digest, - } -} - -// TagNameOnly adds the default tag "latest" to a reference if it only has -// a repo name. -func TagNameOnly(ref Named) Named { - if IsNameOnly(ref) { - namedTagged, err := WithTag(ref, defaultTag) - if err != nil { - // Default tag must be valid, to create a NamedTagged - // type with non-validated input the WithTag function - // should be used instead - panic(err) - } - return namedTagged - } - return ref -} - -// ParseAnyReference parses a reference string as a possible identifier, -// full digest, or familiar name. -func ParseAnyReference(ref string) (Reference, error) { - if ok := anchoredIdentifierRegexp.MatchString(ref); ok { - return digestReference("sha256:" + ref), nil - } - if dgst, err := digest.Parse(ref); err == nil { - return digestReference(dgst), nil - } - - return ParseNormalizedNamed(ref) -} - -// IsNameOnly returns true if reference only contains a repo name. -func IsNameOnly(ref Named) bool { - if _, ok := ref.(NamedTagged); ok { - return false - } - if _, ok := ref.(Canonical); ok { - return false - } - return true -} - -// FamiliarName returns the familiar name string -// for the given named, familiarizing if needed. -func FamiliarName(ref Named) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().Name() - } - return ref.Name() -} - -// FamiliarString returns the familiar string representation -// for the given reference, familiarizing if needed. -func FamiliarString(ref Reference) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().String() - } - return ref.String() -} - -// FamiliarMatch reports whether ref matches the specified pattern. -// See https://godoc.org/path#Match for supported patterns. -func FamiliarMatch(pattern string, ref Reference) (bool, error) { - matched, err := path.Match(pattern, FamiliarString(ref)) - if namedRef, isNamed := ref.(Named); isNamed && !matched { - matched, _ = path.Match(pattern, FamiliarName(namedRef)) - } - return matched, err -} diff --git a/vendor/github.com/containerd/containerd/reference/docker/regexp.go b/vendor/github.com/containerd/containerd/reference/docker/regexp.go new file mode 100644 index 0000000000000..4be3c575e0495 --- /dev/null +++ b/vendor/github.com/containerd/containerd/reference/docker/regexp.go @@ -0,0 +1,191 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import "regexp" + +var ( + // alphaNumeric defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumeric = `[a-z0-9]+` + + // separator defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. Repeated dashes and underscores are intentionally treated + // differently. In order to support valid hostnames as name components, + // supporting repeated dash was added. Additionally double underscore is + // now allowed as a separator to loosen the restriction for previously + // supported names. + separator = `(?:[._]|__|[-]*)` + + // nameComponent restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponent = expression( + alphaNumeric, + optional(repeated(separator, alphaNumeric))) + + // domainNameComponent restricts the registry domain component of a + // repository name to start with a component as defined by DomainRegexp. + domainNameComponent = `(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])` + + // ipv6address are enclosed between square brackets and may be represented + // in many ways, see rfc5952. Only IPv6 in compressed or uncompressed format + // are allowed, IPv6 zone identifiers (rfc6874) or Special addresses such as + // IPv4-Mapped are deliberately excluded. + ipv6address = expression( + literal(`[`), `(?:[a-fA-F0-9:]+)`, literal(`]`), + ) + + // domainName defines the structure of potential domain components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. This includes IPv4 addresses on decimal format. + domainName = expression( + domainNameComponent, + optional(repeated(literal(`.`), domainNameComponent)), + ) + + // host defines the structure of potential domains based on the URI + // Host subcomponent on rfc3986. It may be a subset of DNS domain name, + // or an IPv4 address in decimal format, or an IPv6 address between square + // brackets (excluding zone identifiers as defined by rfc6874 or special + // addresses such as IPv4-Mapped). + host = `(?:` + domainName + `|` + ipv6address + `)` + + // allowed by the URI Host subcomponent on rfc3986 to ensure backwards + // compatibility with Docker image names. + domain = expression( + host, + optional(literal(`:`), `[0-9]+`)) + + // DomainRegexp defines the structure of potential domain components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + DomainRegexp = regexp.MustCompile(domain) + + tag = `[\w][\w.-]{0,127}` + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = regexp.MustCompile(tag) + + anchoredTag = anchored(tag) + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = regexp.MustCompile(anchoredTag) + + digestPat = `[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}` + // DigestRegexp matches valid digests. + DigestRegexp = regexp.MustCompile(digestPat) + + anchoredDigest = anchored(digestPat) + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = regexp.MustCompile(anchoredDigest) + + namePat = expression( + optional(domain, literal(`/`)), + nameComponent, + optional(repeated(literal(`/`), nameComponent))) + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the domain and name part omitting + // the separating forward slash from either. + NameRegexp = regexp.MustCompile(namePat) + + anchoredName = anchored( + optional(capture(domain), literal(`/`)), + capture(nameComponent, + optional(repeated(literal(`/`), nameComponent)))) + // anchoredNameRegexp is used to parse a name value, capturing the + // domain and trailing components. + anchoredNameRegexp = regexp.MustCompile(anchoredName) + + referencePat = anchored(capture(namePat), + optional(literal(":"), capture(tag)), + optional(literal("@"), capture(digestPat))) + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = regexp.MustCompile(referencePat) + + identifier = `([a-f0-9]{64})` + // IdentifierRegexp is the format for string identifier used as a + // content addressable identifier using sha256. These identifiers + // are like digests without the algorithm, since sha256 is used. + IdentifierRegexp = regexp.MustCompile(identifier) + + shortIdentifier = `([a-f0-9]{6,64})` + // ShortIdentifierRegexp is the format used to represent a prefix + // of an identifier. A prefix may be used to match a sha256 identifier + // within a list of trusted identifiers. + ShortIdentifierRegexp = regexp.MustCompile(shortIdentifier) + + anchoredIdentifier = anchored(identifier) + // anchoredIdentifierRegexp is used to check or match an + // identifier value, anchored at start and end of string. + anchoredIdentifierRegexp = regexp.MustCompile(anchoredIdentifier) +) + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) string { + re := regexp.MustCompile(regexp.QuoteMeta(s)) + + if _, complete := re.LiteralPrefix(); !complete { + panic("must be a literal") + } + + return re.String() +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...string) string { + var s string + for _, re := range res { + s += re + } + + return s +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...string) string { + return group(expression(res...)) + `?` +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...string) string { + return group(expression(res...)) + `+` +} + +// group wraps the regexp in a non-capturing group. +func group(res ...string) string { + return `(?:` + expression(res...) + `)` +} + +// capture wraps the expression in a capturing group. +func capture(res ...string) string { + return `(` + expression(res...) + `)` +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...string) string { + return `^` + expression(res...) + `$` +} diff --git a/vendor/github.com/containerd/containerd/reference/docker/sort.go b/vendor/github.com/containerd/containerd/reference/docker/sort.go new file mode 100644 index 0000000000000..984e37528d2a2 --- /dev/null +++ b/vendor/github.com/containerd/containerd/reference/docker/sort.go @@ -0,0 +1,73 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "sort" +) + +// Sort sorts string references preferring higher information references +// The precedence is as follows: +// 1. Name + Tag + Digest +// 2. Name + Tag +// 3. Name + Digest +// 4. Name +// 5. Digest +// 6. Parse error +func Sort(references []string) []string { + var prefs []Reference + var bad []string + + for _, ref := range references { + pref, err := ParseAnyReference(ref) + if err != nil { + bad = append(bad, ref) + } else { + prefs = append(prefs, pref) + } + } + sort.Slice(prefs, func(a, b int) bool { + ar := refRank(prefs[a]) + br := refRank(prefs[b]) + if ar == br { + return prefs[a].String() < prefs[b].String() + } + return ar < br + }) + sort.Strings(bad) + var refs []string + for _, pref := range prefs { + refs = append(refs, pref.String()) + } + return append(refs, bad...) +} + +func refRank(ref Reference) uint8 { + if _, ok := ref.(Named); ok { + if _, ok = ref.(Tagged); ok { + if _, ok = ref.(Digested); ok { + return 1 + } + return 2 + } + if _, ok = ref.(Digested); ok { + return 3 + } + return 4 + } + return 5 +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go b/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go index c259873d2a2ea..64c6a38f91a92 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go @@ -29,7 +29,6 @@ import ( "github.com/containerd/containerd/log" remoteserrors "github.com/containerd/containerd/remotes/errors" "github.com/containerd/containerd/version" - "golang.org/x/net/context/ctxhttp" ) var ( @@ -115,7 +114,7 @@ func FetchTokenWithOAuth(ctx context.Context, client *http.Client, headers http. form.Set("access_type", "offline") } - req, err := http.NewRequest("POST", to.Realm, strings.NewReader(form.Encode())) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, to.Realm, strings.NewReader(form.Encode())) if err != nil { return nil, err } @@ -127,7 +126,7 @@ func FetchTokenWithOAuth(ctx context.Context, client *http.Client, headers http. req.Header.Set("User-Agent", "containerd/"+version.Version) } - resp, err := ctxhttp.Do(ctx, client, req) + resp, err := client.Do(req) if err != nil { return nil, err } @@ -162,7 +161,7 @@ type FetchTokenResponse struct { // FetchToken fetches a token using a GET request func FetchToken(ctx context.Context, client *http.Client, headers http.Header, to TokenOptions) (*FetchTokenResponse, error) { - req, err := http.NewRequest("GET", to.Realm, nil) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, to.Realm, nil) if err != nil { return nil, err } @@ -194,7 +193,7 @@ func FetchToken(ctx context.Context, client *http.Client, headers http.Header, t req.URL.RawQuery = reqParams.Encode() - resp, err := ctxhttp.Do(ctx, client, req) + resp, err := client.Do(req) if err != nil { return nil, err } diff --git a/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go b/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go index eaa0e5dbdbcd5..8fc823144ebf2 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go @@ -29,7 +29,6 @@ import ( "github.com/containerd/containerd/log" "github.com/containerd/containerd/remotes/docker/auth" remoteerrors "github.com/containerd/containerd/remotes/errors" - "github.com/sirupsen/logrus" ) type dockerAuthorizer struct { @@ -45,13 +44,6 @@ type dockerAuthorizer struct { onFetchRefreshToken OnFetchRefreshToken } -// NewAuthorizer creates a Docker authorizer using the provided function to -// get credentials for the token server or basic auth. -// Deprecated: Use NewDockerAuthorizer -func NewAuthorizer(client *http.Client, f func(string) (string, string, error)) Authorizer { - return NewDockerAuthorizer(WithAuthClient(client), WithAuthCreds(f)) -} - type authorizerConfig struct { credentials func(string) (string, string, error) client *http.Client @@ -319,7 +311,7 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (token, refreshToken st } return resp.Token, resp.RefreshToken, nil } - log.G(ctx).WithFields(logrus.Fields{ + log.G(ctx).WithFields(log.Fields{ "status": errStatus.Status, "body": string(errStatus.Body), }).Debugf("token request failed") diff --git a/vendor/github.com/containerd/containerd/remotes/docker/converter_fuzz.go b/vendor/github.com/containerd/containerd/remotes/docker/converter_fuzz.go new file mode 100644 index 0000000000000..9082053924b1a --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/converter_fuzz.go @@ -0,0 +1,55 @@ +//go:build gofuzz + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "os" + + fuzz "github.com/AdaLogics/go-fuzz-headers" + "github.com/containerd/containerd/content/local" + "github.com/containerd/containerd/log" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +func FuzzConvertManifest(data []byte) int { + ctx := context.Background() + + // Do not log the message below + // level=warning msg="do nothing for media type: ..." + log.G(ctx).Logger.SetLevel(logrus.PanicLevel) + + f := fuzz.NewConsumer(data) + desc := ocispec.Descriptor{} + err := f.GenerateStruct(&desc) + if err != nil { + return 0 + } + tmpdir, err := os.MkdirTemp("", "fuzzing-") + if err != nil { + return 0 + } + cs, err := local.NewStore(tmpdir) + if err != nil { + return 0 + } + _, _ = ConvertManifest(ctx, cs, desc) + return 1 +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go index 11a75356e804e..ecf245933f7a6 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go @@ -29,6 +29,7 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" + digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -52,18 +53,17 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R return newHTTPReadSeeker(desc.Size, func(offset int64) (io.ReadCloser, error) { // firstly try fetch via external urls for _, us := range desc.URLs { - ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", us)) - u, err := url.Parse(us) if err != nil { - log.G(ctx).WithError(err).Debug("failed to parse") + log.G(ctx).WithError(err).Debugf("failed to parse %q", us) continue } if u.Scheme != "http" && u.Scheme != "https" { log.G(ctx).Debug("non-http(s) alternative url is unsupported") continue } - log.G(ctx).Debug("trying alternative url") + ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", u)) + log.G(ctx).Info("request") // Try this first, parse it host := RegistryHost{ @@ -151,8 +151,106 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R }) } +func (r dockerFetcher) createGetReq(ctx context.Context, host RegistryHost, ps ...string) (*request, int64, error) { + headReq := r.request(host, http.MethodHead, ps...) + if err := headReq.addNamespace(r.refspec.Hostname()); err != nil { + return nil, 0, err + } + + headResp, err := headReq.doWithRetries(ctx, nil) + if err != nil { + return nil, 0, err + } + if headResp.Body != nil { + headResp.Body.Close() + } + if headResp.StatusCode > 299 { + return nil, 0, fmt.Errorf("unexpected HEAD status code %v: %s", headReq.String(), headResp.Status) + } + + getReq := r.request(host, http.MethodGet, ps...) + if err := getReq.addNamespace(r.refspec.Hostname()); err != nil { + return nil, 0, err + } + return getReq, headResp.ContentLength, nil +} + +func (r dockerFetcher) FetchByDigest(ctx context.Context, dgst digest.Digest) (io.ReadCloser, ocispec.Descriptor, error) { + var desc ocispec.Descriptor + ctx = log.WithLogger(ctx, log.G(ctx).WithField("digest", dgst)) + + hosts := r.filterHosts(HostCapabilityPull) + if len(hosts) == 0 { + return nil, desc, fmt.Errorf("no pull hosts: %w", errdefs.ErrNotFound) + } + + ctx, err := ContextWithRepositoryScope(ctx, r.refspec, false) + if err != nil { + return nil, desc, err + } + + var ( + getReq *request + sz int64 + firstErr error + ) + + for _, host := range r.hosts { + getReq, sz, err = r.createGetReq(ctx, host, "blobs", dgst.String()) + if err == nil { + break + } + // Store the error for referencing later + if firstErr == nil { + firstErr = err + } + } + + if getReq == nil { + // Fall back to the "manifests" endpoint + for _, host := range r.hosts { + getReq, sz, err = r.createGetReq(ctx, host, "manifests", dgst.String()) + if err == nil { + break + } + // Store the error for referencing later + if firstErr == nil { + firstErr = err + } + } + } + + if getReq == nil { + if errdefs.IsNotFound(firstErr) { + firstErr = fmt.Errorf("could not fetch content %v from remote: %w", dgst, errdefs.ErrNotFound) + } + if firstErr == nil { + firstErr = fmt.Errorf("could not fetch content %v from remote: (unknown)", dgst) + } + return nil, desc, firstErr + } + + seeker, err := newHTTPReadSeeker(sz, func(offset int64) (io.ReadCloser, error) { + return r.open(ctx, getReq, "", offset) + }) + if err != nil { + return nil, desc, err + } + + desc = ocispec.Descriptor{ + MediaType: "application/octet-stream", + Digest: dgst, + Size: sz, + } + return seeker, desc, nil +} + func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string, offset int64) (_ io.ReadCloser, retErr error) { - req.header.Set("Accept", strings.Join([]string{mediatype, `*/*`}, ", ")) + if mediatype == "" { + req.header.Set("Accept", "*/*") + } else { + req.header.Set("Accept", strings.Join([]string{mediatype, `*/*`}, ", ")) + } if offset > 0 { // Note: "Accept-Ranges: bytes" cannot be trusted as some endpoints diff --git a/vendor/github.com/containerd/containerd/remotes/docker/fetcher_fuzz.go b/vendor/github.com/containerd/containerd/remotes/docker/fetcher_fuzz.go new file mode 100644 index 0000000000000..b98886c5956e7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/fetcher_fuzz.go @@ -0,0 +1,81 @@ +//go:build gofuzz + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/url" + + refDocker "github.com/containerd/containerd/reference/docker" +) + +func FuzzFetcher(data []byte) int { + dataLen := len(data) + if dataLen == 0 { + return -1 + } + + s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.Header().Set("content-range", fmt.Sprintf("bytes %d-%d/%d", 0, dataLen-1, dataLen)) + rw.Header().Set("content-length", fmt.Sprintf("%d", dataLen)) + rw.Write(data) + })) + defer s.Close() + + u, err := url.Parse(s.URL) + if err != nil { + return 0 + } + + f := dockerFetcher{&dockerBase{ + repository: "nonempty", + }} + host := RegistryHost{ + Client: s.Client(), + Host: u.Host, + Scheme: u.Scheme, + Path: u.Path, + } + + ctx := context.Background() + req := f.request(host, http.MethodGet) + rc, err := f.open(ctx, req, "", 0) + if err != nil { + return 0 + } + b, err := io.ReadAll(rc) + if err != nil { + return 0 + } + + expected := data + if len(b) != len(expected) { + panic("len of request is not equal to len of expected but should be") + } + return 1 +} + +func FuzzParseDockerRef(data []byte) int { + _, _ = refDocker.ParseDockerRef(string(data)) + return 1 +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go index bef77fa61d56c..ef6e8056a0dde 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go @@ -191,6 +191,9 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str if resp == nil { resp, err = req.doWithRetries(ctx, nil) if err != nil { + if errors.Is(err, ErrInvalidAuthorization) { + return nil, fmt.Errorf("push access denied, repository does not exist or may require authorization: %w", err) + } return nil, err } } diff --git a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go index 709fa028de278..48737413b5ceb 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go @@ -32,12 +32,12 @@ import ( "github.com/containerd/containerd/log" "github.com/containerd/containerd/reference" "github.com/containerd/containerd/remotes" - "github.com/containerd/containerd/remotes/docker/schema1" + "github.com/containerd/containerd/remotes/docker/schema1" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. + remoteerrors "github.com/containerd/containerd/remotes/errors" + "github.com/containerd/containerd/tracing" "github.com/containerd/containerd/version" - digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" - "golang.org/x/net/context/ctxhttp" ) var ( @@ -70,6 +70,9 @@ type Authorizer interface { // unmodified. It may also add an `Authorization` header as // "bearer " // "basic " + // + // It may return remotes/errors.ErrUnexpectedStatus, which for example, + // can be used by the caller to find out the status code returned by the registry. Authorize(context.Context, *http.Request) error // AddResponses adds a 401 response for the authorizer to consider when @@ -152,7 +155,8 @@ func NewResolver(options ResolverOptions) remotes.Resolver { images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageManifest, - ocispec.MediaTypeImageIndex, "*/*"}, ", ")) + ocispec.MediaTypeImageIndex, "*/*", + }, ", ")) } else { resolveHeader["Accept"] = options.Headers["Accept"] delete(options.Headers, "Accept") @@ -299,11 +303,11 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp if resp.StatusCode > 399 { // Set firstErr when encountering the first non-404 status code. if firstErr == nil { - firstErr = fmt.Errorf("pulling from host %s failed with status code %v: %v", host.Host, u, resp.Status) + firstErr = remoteerrors.NewUnexpectedStatusErr(resp) } continue // try another host } - return "", ocispec.Descriptor{}, fmt.Errorf("pulling from host %s failed with unexpected status code %v: %v", host.Host, u, resp.Status) + return "", ocispec.Descriptor{}, remoteerrors.NewUnexpectedStatusErr(resp) } size := resp.ContentLength contentType := getManifestMediaType(resp) @@ -340,26 +344,31 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp if err != nil { return "", ocispec.Descriptor{}, err } - defer resp.Body.Close() bodyReader := countingReader{reader: resp.Body} contentType = getManifestMediaType(resp) - if dgst == "" { + err = func() error { + defer resp.Body.Close() + if dgst != "" { + _, err = io.Copy(io.Discard, &bodyReader) + return err + } + if contentType == images.MediaTypeDockerSchema1Manifest { b, err := schema1.ReadStripSignature(&bodyReader) if err != nil { - return "", ocispec.Descriptor{}, err + return err } dgst = digest.FromBytes(b) - } else { - dgst, err = digest.FromReader(&bodyReader) - if err != nil { - return "", ocispec.Descriptor{}, err - } + return nil } - } else if _, err := io.Copy(io.Discard, &bodyReader); err != nil { + + dgst, err = digest.FromReader(&bodyReader) + return err + }() + if err != nil { return "", ocispec.Descriptor{}, err } size = bodyReader.bytesRead @@ -525,7 +534,7 @@ type request struct { func (r *request) do(ctx context.Context) (*http.Response, error) { u := r.host.Scheme + "://" + r.host.Host + r.path - req, err := http.NewRequest(r.method, u, nil) + req, err := http.NewRequestWithContext(ctx, r.method, u, nil) if err != nil { return nil, err } @@ -551,7 +560,7 @@ func (r *request) do(ctx context.Context) (*http.Response, error) { return nil, fmt.Errorf("failed to authorize: %w", err) } - var client = &http.Client{} + client := &http.Client{} if r.host.Client != nil { *client = *r.host.Client } @@ -566,11 +575,18 @@ func (r *request) do(ctx context.Context) (*http.Response, error) { return nil } } - - resp, err := ctxhttp.Do(ctx, client, req) + _, httpSpan := tracing.StartSpan( + ctx, + tracing.Name("remotes.docker.resolver", "HTTPRequest"), + tracing.WithHTTPRequest(req), + ) + defer httpSpan.End() + resp, err := client.Do(req) if err != nil { + httpSpan.SetStatus(err) return nil, fmt.Errorf("failed to do request: %w", err) } + httpSpan.SetAttributes(tracing.HTTPStatusCodeAttributes(resp.StatusCode)...) log.G(ctx).WithFields(responseFields(resp)).Debug("fetch response received") return resp, nil } @@ -630,7 +646,7 @@ func (r *request) String() string { return r.host.Scheme + "://" + r.host.Host + r.path } -func requestFields(req *http.Request) logrus.Fields { +func requestFields(req *http.Request) log.Fields { fields := map[string]interface{}{ "request.method": req.Method, } @@ -648,10 +664,10 @@ func requestFields(req *http.Request) logrus.Fields { } } - return logrus.Fields(fields) + return log.Fields(fields) } -func responseFields(resp *http.Response) logrus.Fields { +func responseFields(resp *http.Response) log.Fields { fields := map[string]interface{}{ "response.status": resp.Status, } @@ -666,7 +682,7 @@ func responseFields(resp *http.Response) logrus.Fields { } } - return logrus.Fields(fields) + return log.Fields(fields) } // IsLocalhost checks if the registry host is local. diff --git a/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go b/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go index efa4e8d6eeb3f..8c9e520cd27e5 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go @@ -14,6 +14,9 @@ limitations under the License. */ +// Package schema1 provides a converter to fetch an image formatted in Docker Image Manifest v2, Schema 1. +// +// Deprecated: use images formatted in Docker Image Manifest v2, Schema 2, or OCI Image Spec v1. package schema1 import ( @@ -33,6 +36,7 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" + "github.com/containerd/containerd/labels" "github.com/containerd/containerd/log" "github.com/containerd/containerd/remotes" digest "github.com/opencontainers/go-digest" @@ -363,12 +367,12 @@ func (c *Converter) fetchBlob(ctx context.Context, desc ocispec.Descriptor) erro cinfo := content.Info{ Digest: desc.Digest, Labels: map[string]string{ - "containerd.io/uncompressed": state.diffID.String(), + labels.LabelUncompressed: state.diffID.String(), labelDockerSchema1EmptyLayer: strconv.FormatBool(state.empty), }, } - if _, err := c.contentStore.Update(ctx, cinfo, "labels.containerd.io/uncompressed", fmt.Sprintf("labels.%s", labelDockerSchema1EmptyLayer)); err != nil { + if _, err := c.contentStore.Update(ctx, cinfo, "labels."+labels.LabelUncompressed, fmt.Sprintf("labels.%s", labelDockerSchema1EmptyLayer)); err != nil { return fmt.Errorf("failed to update uncompressed label: %w", err) } @@ -387,7 +391,7 @@ func (c *Converter) reuseLabelBlobState(ctx context.Context, desc ocispec.Descri } desc.Size = cinfo.Size - diffID, ok := cinfo.Labels["containerd.io/uncompressed"] + diffID, ok := cinfo.Labels[labels.LabelUncompressed] if !ok { return false, nil } @@ -406,7 +410,7 @@ func (c *Converter) reuseLabelBlobState(ctx context.Context, desc ocispec.Descri bState := blobState{empty: isEmpty} if bState.diffID, err = digest.Parse(diffID); err != nil { - log.G(ctx).WithField("id", desc.Digest).Warnf("failed to parse digest from label containerd.io/uncompressed: %v", diffID) + log.G(ctx).WithField("id", desc.Digest).Warnf("failed to parse digest from label %s: %v", labels.LabelUncompressed, diffID) return false, nil } diff --git a/vendor/github.com/containerd/containerd/remotes/errors/errors.go b/vendor/github.com/containerd/containerd/remotes/errors/errors.go index 67ccb23df6e88..f60ff0fc286c4 100644 --- a/vendor/github.com/containerd/containerd/remotes/errors/errors.go +++ b/vendor/github.com/containerd/containerd/remotes/errors/errors.go @@ -33,7 +33,7 @@ type ErrUnexpectedStatus struct { } func (e ErrUnexpectedStatus) Error() string { - return fmt.Sprintf("unexpected status: %s", e.Status) + return fmt.Sprintf("unexpected status from %s request to %s: %s", e.RequestMethod, e.RequestURL, e.Status) } // NewUnexpectedStatusErr creates an ErrUnexpectedStatus from HTTP response diff --git a/vendor/github.com/containerd/containerd/remotes/handlers.go b/vendor/github.com/containerd/containerd/remotes/handlers.go index 4d91ed2e54d0a..b2e1725572a58 100644 --- a/vendor/github.com/containerd/containerd/remotes/handlers.go +++ b/vendor/github.com/containerd/containerd/remotes/handlers.go @@ -17,6 +17,7 @@ package remotes import ( + "bytes" "context" "errors" "fmt" @@ -30,7 +31,6 @@ import ( "github.com/containerd/containerd/log" "github.com/containerd/containerd/platforms" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" "golang.org/x/sync/semaphore" ) @@ -90,7 +90,7 @@ func MakeRefKey(ctx context.Context, desc ocispec.Descriptor) string { // recursive fetch. func FetchHandler(ingester content.Ingester, fetcher Fetcher) images.HandlerFunc { return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { - ctx = log.WithLogger(ctx, log.G(ctx).WithFields(logrus.Fields{ + ctx = log.WithLogger(ctx, log.G(ctx).WithFields(log.Fields{ "digest": desc.Digest, "mediatype": desc.MediaType, "size": desc.Size, @@ -100,20 +100,21 @@ func FetchHandler(ingester content.Ingester, fetcher Fetcher) images.HandlerFunc case images.MediaTypeDockerSchema1Manifest: return nil, fmt.Errorf("%v not supported", desc.MediaType) default: - err := fetch(ctx, ingester, fetcher, desc) + err := Fetch(ctx, ingester, fetcher, desc) + if errdefs.IsAlreadyExists(err) { + return nil, nil + } return nil, err } } } -func fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc ocispec.Descriptor) error { +// Fetch fetches the given digest into the provided ingester +func Fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc ocispec.Descriptor) error { log.G(ctx).Debug("fetch") cw, err := content.OpenWriter(ctx, ingester, content.WithRef(MakeRefKey(ctx, desc)), content.WithDescriptor(desc)) if err != nil { - if errdefs.IsAlreadyExists(err) { - return nil - } return err } defer cw.Close() @@ -135,7 +136,11 @@ func fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc if err != nil && !errdefs.IsAlreadyExists(err) { return fmt.Errorf("failed commit on ref %q: %w", ws.Ref, err) } - return nil + return err + } + + if desc.Size == int64(len(desc.Data)) { + return content.Copy(ctx, cw, bytes.NewReader(desc.Data), desc.Size, desc.Digest) } rc, err := fetcher.Fetch(ctx, desc) @@ -151,7 +156,7 @@ func fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc // using a writer from the pusher. func PushHandler(pusher Pusher, provider content.Provider) images.HandlerFunc { return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - ctx = log.WithLogger(ctx, log.G(ctx).WithFields(logrus.Fields{ + ctx = log.WithLogger(ctx, log.G(ctx).WithFields(log.Fields{ "digest": desc.Digest, "mediatype": desc.MediaType, "size": desc.Size, @@ -197,17 +202,25 @@ func push(ctx context.Context, provider content.Provider, pusher Pusher, desc oc // // Base handlers can be provided which will be called before any push specific // handlers. -func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, store content.Store, limiter *semaphore.Weighted, platform platforms.MatchComparer, wrapper func(h images.Handler) images.Handler) error { +// +// If the passed in content.Provider is also a content.Manager then this will +// also annotate the distribution sources in the manager. +func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, store content.Provider, limiter *semaphore.Weighted, platform platforms.MatchComparer, wrapper func(h images.Handler) images.Handler) error { var m sync.Mutex - manifestStack := []ocispec.Descriptor{} + manifests := []ocispec.Descriptor{} + indexStack := []ocispec.Descriptor{} filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { switch desc.MediaType { - case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest, - images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: m.Lock() - manifestStack = append(manifestStack, desc) + manifests = append(manifests, desc) + m.Unlock() + return nil, images.ErrStopHandler + case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + m.Lock() + indexStack = append(indexStack, desc) m.Unlock() return nil, images.ErrStopHandler default: @@ -219,13 +232,14 @@ func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, st platformFilterhandler := images.FilterPlatforms(images.ChildrenHandler(store), platform) - annotateHandler := annotateDistributionSourceHandler(platformFilterhandler, store) + var handler images.Handler + if m, ok := store.(content.Manager); ok { + annotateHandler := annotateDistributionSourceHandler(platformFilterhandler, m) + handler = images.Handlers(annotateHandler, filterHandler, pushHandler) + } else { + handler = images.Handlers(platformFilterhandler, filterHandler, pushHandler) + } - var handler images.Handler = images.Handlers( - annotateHandler, - filterHandler, - pushHandler, - ) if wrapper != nil { handler = wrapper(handler) } @@ -234,16 +248,18 @@ func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, st return err } + if err := images.Dispatch(ctx, pushHandler, limiter, manifests...); err != nil { + return err + } + // Iterate in reverse order as seen, parent always uploaded after child - for i := len(manifestStack) - 1; i >= 0; i-- { - _, err := pushHandler(ctx, manifestStack[i]) + for i := len(indexStack) - 1; i >= 0; i-- { + err := images.Dispatch(ctx, pushHandler, limiter, indexStack[i]) if err != nil { // TODO(estesp): until we have a more complete method for index push, we need to report // missing dependencies in an index/manifest list by sensing the "400 Bad Request" // as a marker for this problem - if (manifestStack[i].MediaType == ocispec.MediaTypeImageIndex || - manifestStack[i].MediaType == images.MediaTypeDockerSchema2ManifestList) && - errors.Unwrap(err) != nil && strings.Contains(errors.Unwrap(err).Error(), "400 Bad Request") { + if errors.Unwrap(err) != nil && strings.Contains(errors.Unwrap(err).Error(), "400 Bad Request") { return fmt.Errorf("manifest list/index references to blobs and/or manifests are missing in your target registry: %w", err) } return err diff --git a/vendor/github.com/containerd/containerd/remotes/resolver.go b/vendor/github.com/containerd/containerd/remotes/resolver.go index 624b14f05d6c3..f200c84bc7dad 100644 --- a/vendor/github.com/containerd/containerd/remotes/resolver.go +++ b/vendor/github.com/containerd/containerd/remotes/resolver.go @@ -21,6 +21,7 @@ import ( "io" "github.com/containerd/containerd/content" + "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -33,7 +34,7 @@ type Resolver interface { // reference a specific host or be matched against a specific handler. // // The returned name should be used to identify the referenced entity. - // Dependending on the remote namespace, this may be immutable or mutable. + // Depending on the remote namespace, this may be immutable or mutable. // While the name may differ from ref, it should itself be a valid ref. // // If the resolution fails, an error will be returned. @@ -50,12 +51,23 @@ type Resolver interface { Pusher(ctx context.Context, ref string) (Pusher, error) } -// Fetcher fetches content +// Fetcher fetches content. +// A fetcher implementation may implement the FetcherByDigest interface too. type Fetcher interface { // Fetch the resource identified by the descriptor. Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) } +// FetcherByDigest fetches content by the digest. +type FetcherByDigest interface { + // FetchByDigest fetches the resource identified by the digest. + // + // FetcherByDigest usually returns an incomplete descriptor. + // Typically, the media type is always set to "application/octet-stream", + // and the annotations are unset. + FetchByDigest(ctx context.Context, dgst digest.Digest) (io.ReadCloser, ocispec.Descriptor, error) +} + // Pusher pushes content type Pusher interface { // Push returns a content writer for the given resource identified diff --git a/vendor/github.com/containerd/containerd/rootfs/apply.go b/vendor/github.com/containerd/containerd/rootfs/apply.go index b3f388e64126a..35eae6d63de15 100644 --- a/vendor/github.com/containerd/containerd/rootfs/apply.go +++ b/vendor/github.com/containerd/containerd/rootfs/apply.go @@ -18,9 +18,9 @@ package rootfs import ( "context" + "crypto/rand" "encoding/base64" "fmt" - "math/rand" "time" "github.com/containerd/containerd/diff" diff --git a/vendor/github.com/containerd/containerd/rootfs/diff.go b/vendor/github.com/containerd/containerd/rootfs/diff.go index 226cebccf230c..da9dbe275291f 100644 --- a/vendor/github.com/containerd/containerd/rootfs/diff.go +++ b/vendor/github.com/containerd/containerd/rootfs/diff.go @@ -22,7 +22,7 @@ import ( "github.com/containerd/containerd/diff" "github.com/containerd/containerd/mount" - "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/pkg/cleanup" "github.com/containerd/containerd/snapshots" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -32,13 +32,6 @@ import ( // the content creation and the provided snapshotter and mount differ are used // for calculating the diff. The descriptor for the layer diff is returned. func CreateDiff(ctx context.Context, snapshotID string, sn snapshots.Snapshotter, d diff.Comparer, opts ...diff.Opt) (ocispec.Descriptor, error) { - // dctx is used to handle cleanup things just in case the param ctx - // has been canceled, which causes that the defer cleanup fails. - dctx := context.Background() - if ns, ok := namespaces.Namespace(ctx); ok { - dctx = namespaces.WithNamespace(dctx, ns) - } - info, err := sn.Stat(ctx, snapshotID) if err != nil { return ocispec.Descriptor{}, err @@ -49,7 +42,9 @@ func CreateDiff(ctx context.Context, snapshotID string, sn snapshots.Snapshotter if err != nil { return ocispec.Descriptor{}, err } - defer sn.Remove(dctx, lowerKey) + defer cleanup.Do(ctx, func(ctx context.Context) { + sn.Remove(ctx, lowerKey) + }) var upper []mount.Mount if info.Kind == snapshots.KindActive { @@ -63,7 +58,9 @@ func CreateDiff(ctx context.Context, snapshotID string, sn snapshots.Snapshotter if err != nil { return ocispec.Descriptor{}, err } - defer sn.Remove(dctx, upperKey) + defer cleanup.Do(ctx, func(ctx context.Context) { + sn.Remove(ctx, upperKey) + }) } return d.Compare(ctx, lower, upper, opts...) diff --git a/vendor/github.com/containerd/containerd/rootfs/init_other.go b/vendor/github.com/containerd/containerd/rootfs/init_other.go index d8e38d4c78b7c..049cff2880a24 100644 --- a/vendor/github.com/containerd/containerd/rootfs/init_other.go +++ b/vendor/github.com/containerd/containerd/rootfs/init_other.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/runtime/linux/runctypes/next.pb.txt b/vendor/github.com/containerd/containerd/runtime/linux/runctypes/next.pb.txt index cdf0e9ddcee6e..eb94415b42a1f 100644 --- a/vendor/github.com/containerd/containerd/runtime/linux/runctypes/next.pb.txt +++ b/vendor/github.com/containerd/containerd/runtime/linux/runctypes/next.pb.txt @@ -1,7 +1,6 @@ file { name: "github.com/containerd/containerd/runtime/linux/runctypes/runc.proto" package: "containerd.linux.runc" - dependency: "gogoproto/gogo.proto" message_type { name: "RuncOptions" field { @@ -23,6 +22,9 @@ file { number: 3 label: LABEL_OPTIONAL type: TYPE_STRING + options { + deprecated: true + } json_name: "criuPath" } field { @@ -206,6 +208,5 @@ file { options { go_package: "github.com/containerd/containerd/runtime/linux/runctypes;runctypes" } - weak_dependency: 0 syntax: "proto3" } diff --git a/vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.pb.go b/vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.pb.go index 46d31ff59a514..37f3329bce091 100644 --- a/vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.pb.go +++ b/vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.pb.go @@ -1,1811 +1,581 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/runtime/linux/runctypes/runc.proto package runctypes import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type RuncOptions struct { - Runtime string `protobuf:"bytes,1,opt,name=runtime,proto3" json:"runtime,omitempty"` - RuntimeRoot string `protobuf:"bytes,2,opt,name=runtime_root,json=runtimeRoot,proto3" json:"runtime_root,omitempty"` - CriuPath string `protobuf:"bytes,3,opt,name=criu_path,json=criuPath,proto3" json:"criu_path,omitempty"` - SystemdCgroup bool `protobuf:"varint,4,opt,name=systemd_cgroup,json=systemdCgroup,proto3" json:"systemd_cgroup,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *RuncOptions) Reset() { *m = RuncOptions{} } -func (*RuncOptions) ProtoMessage() {} -func (*RuncOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_d20e2ba8b3cc58b9, []int{0} + Runtime string `protobuf:"bytes,1,opt,name=runtime,proto3" json:"runtime,omitempty"` + RuntimeRoot string `protobuf:"bytes,2,opt,name=runtime_root,json=runtimeRoot,proto3" json:"runtime_root,omitempty"` + // criu binary path. + // + // Deprecated: runc option --criu is now ignored (with a warning), and the + // option will be removed entirely in a future release. Users who need a non- + // standard criu binary should rely on the standard way of looking up binaries + // in $PATH. + // + // Deprecated: Do not use. + CriuPath string `protobuf:"bytes,3,opt,name=criu_path,json=criuPath,proto3" json:"criu_path,omitempty"` + SystemdCgroup bool `protobuf:"varint,4,opt,name=systemd_cgroup,json=systemdCgroup,proto3" json:"systemd_cgroup,omitempty"` } -func (m *RuncOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RuncOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RuncOptions.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil + +func (x *RuncOptions) Reset() { + *x = RuncOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *RuncOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RuncOptions.Merge(m, src) -} -func (m *RuncOptions) XXX_Size() int { - return m.Size() -} -func (m *RuncOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RuncOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_RuncOptions proto.InternalMessageInfo -type CreateOptions struct { - NoPivotRoot bool `protobuf:"varint,1,opt,name=no_pivot_root,json=noPivotRoot,proto3" json:"no_pivot_root,omitempty"` - OpenTcp bool `protobuf:"varint,2,opt,name=open_tcp,json=openTcp,proto3" json:"open_tcp,omitempty"` - ExternalUnixSockets bool `protobuf:"varint,3,opt,name=external_unix_sockets,json=externalUnixSockets,proto3" json:"external_unix_sockets,omitempty"` - Terminal bool `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"` - FileLocks bool `protobuf:"varint,5,opt,name=file_locks,json=fileLocks,proto3" json:"file_locks,omitempty"` - EmptyNamespaces []string `protobuf:"bytes,6,rep,name=empty_namespaces,json=emptyNamespaces,proto3" json:"empty_namespaces,omitempty"` - CgroupsMode string `protobuf:"bytes,7,opt,name=cgroups_mode,json=cgroupsMode,proto3" json:"cgroups_mode,omitempty"` - NoNewKeyring bool `protobuf:"varint,8,opt,name=no_new_keyring,json=noNewKeyring,proto3" json:"no_new_keyring,omitempty"` - ShimCgroup string `protobuf:"bytes,9,opt,name=shim_cgroup,json=shimCgroup,proto3" json:"shim_cgroup,omitempty"` - IoUid uint32 `protobuf:"varint,10,opt,name=io_uid,json=ioUid,proto3" json:"io_uid,omitempty"` - IoGid uint32 `protobuf:"varint,11,opt,name=io_gid,json=ioGid,proto3" json:"io_gid,omitempty"` - CriuWorkPath string `protobuf:"bytes,12,opt,name=criu_work_path,json=criuWorkPath,proto3" json:"criu_work_path,omitempty"` - CriuImagePath string `protobuf:"bytes,13,opt,name=criu_image_path,json=criuImagePath,proto3" json:"criu_image_path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *RuncOptions) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *CreateOptions) Reset() { *m = CreateOptions{} } -func (*CreateOptions) ProtoMessage() {} -func (*CreateOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_d20e2ba8b3cc58b9, []int{1} -} -func (m *CreateOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CreateOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CreateOptions.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*RuncOptions) ProtoMessage() {} + +func (x *RuncOptions) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *CreateOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateOptions.Merge(m, src) -} -func (m *CreateOptions) XXX_Size() int { - return m.Size() -} -func (m *CreateOptions) XXX_DiscardUnknown() { - xxx_messageInfo_CreateOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateOptions proto.InternalMessageInfo -type CheckpointOptions struct { - Exit bool `protobuf:"varint,1,opt,name=exit,proto3" json:"exit,omitempty"` - OpenTcp bool `protobuf:"varint,2,opt,name=open_tcp,json=openTcp,proto3" json:"open_tcp,omitempty"` - ExternalUnixSockets bool `protobuf:"varint,3,opt,name=external_unix_sockets,json=externalUnixSockets,proto3" json:"external_unix_sockets,omitempty"` - Terminal bool `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"` - FileLocks bool `protobuf:"varint,5,opt,name=file_locks,json=fileLocks,proto3" json:"file_locks,omitempty"` - EmptyNamespaces []string `protobuf:"bytes,6,rep,name=empty_namespaces,json=emptyNamespaces,proto3" json:"empty_namespaces,omitempty"` - CgroupsMode string `protobuf:"bytes,7,opt,name=cgroups_mode,json=cgroupsMode,proto3" json:"cgroups_mode,omitempty"` - WorkPath string `protobuf:"bytes,8,opt,name=work_path,json=workPath,proto3" json:"work_path,omitempty"` - ImagePath string `protobuf:"bytes,9,opt,name=image_path,json=imagePath,proto3" json:"image_path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +// Deprecated: Use RuncOptions.ProtoReflect.Descriptor instead. +func (*RuncOptions) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDescGZIP(), []int{0} } -func (m *CheckpointOptions) Reset() { *m = CheckpointOptions{} } -func (*CheckpointOptions) ProtoMessage() {} -func (*CheckpointOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_d20e2ba8b3cc58b9, []int{2} -} -func (m *CheckpointOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CheckpointOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CheckpointOptions.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *RuncOptions) GetRuntime() string { + if x != nil { + return x.Runtime } + return "" } -func (m *CheckpointOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_CheckpointOptions.Merge(m, src) + +func (x *RuncOptions) GetRuntimeRoot() string { + if x != nil { + return x.RuntimeRoot + } + return "" } -func (m *CheckpointOptions) XXX_Size() int { - return m.Size() + +// Deprecated: Do not use. +func (x *RuncOptions) GetCriuPath() string { + if x != nil { + return x.CriuPath + } + return "" } -func (m *CheckpointOptions) XXX_DiscardUnknown() { - xxx_messageInfo_CheckpointOptions.DiscardUnknown(m) + +func (x *RuncOptions) GetSystemdCgroup() bool { + if x != nil { + return x.SystemdCgroup + } + return false } -var xxx_messageInfo_CheckpointOptions proto.InternalMessageInfo +type CreateOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -type ProcessDetails struct { - ExecID string `protobuf:"bytes,1,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + NoPivotRoot bool `protobuf:"varint,1,opt,name=no_pivot_root,json=noPivotRoot,proto3" json:"no_pivot_root,omitempty"` + OpenTcp bool `protobuf:"varint,2,opt,name=open_tcp,json=openTcp,proto3" json:"open_tcp,omitempty"` + ExternalUnixSockets bool `protobuf:"varint,3,opt,name=external_unix_sockets,json=externalUnixSockets,proto3" json:"external_unix_sockets,omitempty"` + Terminal bool `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"` + FileLocks bool `protobuf:"varint,5,opt,name=file_locks,json=fileLocks,proto3" json:"file_locks,omitempty"` + EmptyNamespaces []string `protobuf:"bytes,6,rep,name=empty_namespaces,json=emptyNamespaces,proto3" json:"empty_namespaces,omitempty"` + CgroupsMode string `protobuf:"bytes,7,opt,name=cgroups_mode,json=cgroupsMode,proto3" json:"cgroups_mode,omitempty"` + NoNewKeyring bool `protobuf:"varint,8,opt,name=no_new_keyring,json=noNewKeyring,proto3" json:"no_new_keyring,omitempty"` + ShimCgroup string `protobuf:"bytes,9,opt,name=shim_cgroup,json=shimCgroup,proto3" json:"shim_cgroup,omitempty"` + IoUid uint32 `protobuf:"varint,10,opt,name=io_uid,json=ioUid,proto3" json:"io_uid,omitempty"` + IoGid uint32 `protobuf:"varint,11,opt,name=io_gid,json=ioGid,proto3" json:"io_gid,omitempty"` + CriuWorkPath string `protobuf:"bytes,12,opt,name=criu_work_path,json=criuWorkPath,proto3" json:"criu_work_path,omitempty"` + CriuImagePath string `protobuf:"bytes,13,opt,name=criu_image_path,json=criuImagePath,proto3" json:"criu_image_path,omitempty"` } -func (m *ProcessDetails) Reset() { *m = ProcessDetails{} } -func (*ProcessDetails) ProtoMessage() {} -func (*ProcessDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_d20e2ba8b3cc58b9, []int{3} -} -func (m *ProcessDetails) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProcessDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProcessDetails.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *CreateOptions) Reset() { + *x = CreateOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *ProcessDetails) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProcessDetails.Merge(m, src) -} -func (m *ProcessDetails) XXX_Size() int { - return m.Size() -} -func (m *ProcessDetails) XXX_DiscardUnknown() { - xxx_messageInfo_ProcessDetails.DiscardUnknown(m) + +func (x *CreateOptions) String() string { + return protoimpl.X.MessageStringOf(x) } -var xxx_messageInfo_ProcessDetails proto.InternalMessageInfo +func (*CreateOptions) ProtoMessage() {} -func init() { - proto.RegisterType((*RuncOptions)(nil), "containerd.linux.runc.RuncOptions") - proto.RegisterType((*CreateOptions)(nil), "containerd.linux.runc.CreateOptions") - proto.RegisterType((*CheckpointOptions)(nil), "containerd.linux.runc.CheckpointOptions") - proto.RegisterType((*ProcessDetails)(nil), "containerd.linux.runc.ProcessDetails") +func (x *CreateOptions) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -func init() { - proto.RegisterFile("github.com/containerd/containerd/runtime/linux/runctypes/runc.proto", fileDescriptor_d20e2ba8b3cc58b9) +// Deprecated: Use CreateOptions.ProtoReflect.Descriptor instead. +func (*CreateOptions) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDescGZIP(), []int{1} } -var fileDescriptor_d20e2ba8b3cc58b9 = []byte{ - // 604 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x94, 0xcf, 0x6e, 0xd3, 0x40, - 0x10, 0xc6, 0xeb, 0xfe, 0x49, 0x9c, 0x49, 0xd2, 0xc2, 0x42, 0x25, 0xd3, 0xaa, 0x69, 0x08, 0x7f, - 0x14, 0x2e, 0xa9, 0x04, 0xe2, 0xc4, 0xad, 0x29, 0x42, 0x15, 0x50, 0x2a, 0x43, 0x05, 0x42, 0x48, - 0x2b, 0x77, 0x3d, 0x24, 0xab, 0xc4, 0x3b, 0x96, 0x77, 0x4d, 0x92, 0x1b, 0x4f, 0xc0, 0x0b, 0xf1, - 0x02, 0x3d, 0x21, 0x8e, 0x9c, 0x10, 0xcd, 0x93, 0xa0, 0x5d, 0xc7, 0x69, 0xcf, 0x1c, 0xb9, 0xcd, - 0xfc, 0xe6, 0xb3, 0x67, 0xf4, 0x7d, 0xb2, 0xa1, 0x3f, 0x90, 0x66, 0x98, 0x9f, 0xf7, 0x04, 0x25, - 0x07, 0x82, 0x94, 0x89, 0xa4, 0xc2, 0x2c, 0xbe, 0x5e, 0x66, 0xb9, 0x32, 0x32, 0xc1, 0x83, 0xb1, - 0x54, 0xf9, 0xd4, 0x76, 0xc2, 0xcc, 0x52, 0xd4, 0xae, 0xea, 0xa5, 0x19, 0x19, 0x62, 0xdb, 0x57, - 0xf2, 0x9e, 0x93, 0xf5, 0xec, 0x70, 0xe7, 0xf6, 0x80, 0x06, 0xe4, 0x14, 0x07, 0xb6, 0x2a, 0xc4, - 0x9d, 0x6f, 0x1e, 0xd4, 0xc3, 0x5c, 0x89, 0x37, 0xa9, 0x91, 0xa4, 0x34, 0x0b, 0xa0, 0xba, 0x58, - 0x11, 0x78, 0x6d, 0xaf, 0x5b, 0x0b, 0xcb, 0x96, 0xdd, 0x85, 0xc6, 0xa2, 0xe4, 0x19, 0x91, 0x09, - 0x56, 0xdd, 0xb8, 0xbe, 0x60, 0x21, 0x91, 0x61, 0xbb, 0x50, 0x13, 0x99, 0xcc, 0x79, 0x1a, 0x99, - 0x61, 0xb0, 0xe6, 0xe6, 0xbe, 0x05, 0xa7, 0x91, 0x19, 0xb2, 0x07, 0xb0, 0xa9, 0x67, 0xda, 0x60, - 0x12, 0x73, 0x31, 0xc8, 0x28, 0x4f, 0x83, 0xf5, 0xb6, 0xd7, 0xf5, 0xc3, 0xe6, 0x82, 0xf6, 0x1d, - 0xec, 0xfc, 0x58, 0x83, 0x66, 0x3f, 0xc3, 0xc8, 0x60, 0x79, 0x52, 0x07, 0x9a, 0x8a, 0x78, 0x2a, - 0xbf, 0x90, 0x29, 0x36, 0x7b, 0xee, 0xb9, 0xba, 0xa2, 0x53, 0xcb, 0xdc, 0xe6, 0x3b, 0xe0, 0x53, - 0x8a, 0x8a, 0x1b, 0x91, 0xba, 0xc3, 0xfc, 0xb0, 0x6a, 0xfb, 0x77, 0x22, 0x65, 0x8f, 0x61, 0x1b, - 0xa7, 0x06, 0x33, 0x15, 0x8d, 0x79, 0xae, 0xe4, 0x94, 0x6b, 0x12, 0x23, 0x34, 0xda, 0x1d, 0xe8, - 0x87, 0xb7, 0xca, 0xe1, 0x99, 0x92, 0xd3, 0xb7, 0xc5, 0x88, 0xed, 0x80, 0x6f, 0x30, 0x4b, 0xa4, - 0x8a, 0xc6, 0x8b, 0x2b, 0x97, 0x3d, 0xdb, 0x03, 0xf8, 0x2c, 0xc7, 0xc8, 0xc7, 0x24, 0x46, 0x3a, - 0xd8, 0x70, 0xd3, 0x9a, 0x25, 0xaf, 0x2c, 0x60, 0x8f, 0xe0, 0x06, 0x26, 0xa9, 0x99, 0x71, 0x15, - 0x25, 0xa8, 0xd3, 0x48, 0xa0, 0x0e, 0x2a, 0xed, 0xb5, 0x6e, 0x2d, 0xdc, 0x72, 0xfc, 0x64, 0x89, - 0xad, 0xa3, 0x85, 0x13, 0x9a, 0x27, 0x14, 0x63, 0x50, 0x2d, 0x1c, 0x5d, 0xb0, 0xd7, 0x14, 0x23, - 0xbb, 0x0f, 0x9b, 0x8a, 0xb8, 0xc2, 0x09, 0x1f, 0xe1, 0x2c, 0x93, 0x6a, 0x10, 0xf8, 0x6e, 0x61, - 0x43, 0xd1, 0x09, 0x4e, 0x5e, 0x16, 0x8c, 0xed, 0x43, 0x5d, 0x0f, 0x65, 0x52, 0xfa, 0x5a, 0x73, - 0xef, 0x01, 0x8b, 0x0a, 0x53, 0xd9, 0x36, 0x54, 0x24, 0xf1, 0x5c, 0xc6, 0x01, 0xb4, 0xbd, 0x6e, - 0x33, 0xdc, 0x90, 0x74, 0x26, 0xe3, 0x05, 0x1e, 0xc8, 0x38, 0xa8, 0x97, 0xf8, 0x85, 0x8c, 0xed, - 0x52, 0x17, 0xe3, 0x84, 0xb2, 0x51, 0x91, 0x65, 0xc3, 0xbd, 0xb1, 0x61, 0xe9, 0x7b, 0xca, 0x46, - 0x2e, 0xcf, 0x87, 0xb0, 0xe5, 0x54, 0x32, 0x89, 0x06, 0x58, 0xc8, 0x9a, 0x4e, 0xd6, 0xb4, 0xf8, - 0xd8, 0x52, 0xab, 0xeb, 0x7c, 0x5f, 0x85, 0x9b, 0xfd, 0x21, 0x8a, 0x51, 0x4a, 0x52, 0x99, 0x32, - 0x54, 0x06, 0xeb, 0x38, 0x95, 0x65, 0x96, 0xae, 0xfe, 0x6f, 0x43, 0xdc, 0x85, 0xda, 0x95, 0x95, - 0x7e, 0xf1, 0x59, 0x4c, 0x4a, 0x1b, 0xf7, 0x00, 0xae, 0x39, 0x58, 0x44, 0x57, 0x93, 0x4b, 0xf7, - 0x9e, 0xc2, 0xe6, 0x69, 0x46, 0x02, 0xb5, 0x3e, 0x42, 0x13, 0xc9, 0xb1, 0x66, 0xf7, 0xa0, 0x8a, - 0x53, 0x14, 0x5c, 0xc6, 0xc5, 0x17, 0x7a, 0x08, 0xf3, 0xdf, 0xfb, 0x95, 0xe7, 0x53, 0x14, 0xc7, - 0x47, 0x61, 0xc5, 0x8e, 0x8e, 0xe3, 0xc3, 0x4f, 0x17, 0x97, 0xad, 0x95, 0x5f, 0x97, 0xad, 0x95, - 0xaf, 0xf3, 0x96, 0x77, 0x31, 0x6f, 0x79, 0x3f, 0xe7, 0x2d, 0xef, 0xcf, 0xbc, 0xe5, 0x7d, 0x3c, - 0xfc, 0xd7, 0x5f, 0xcc, 0xb3, 0x65, 0xf5, 0x61, 0xe5, 0xbc, 0xe2, 0xfe, 0x1e, 0x4f, 0xfe, 0x06, - 0x00, 0x00, 0xff, 0xff, 0x7f, 0x24, 0x6f, 0x2e, 0xb1, 0x04, 0x00, 0x00, +func (x *CreateOptions) GetNoPivotRoot() bool { + if x != nil { + return x.NoPivotRoot + } + return false } -func (m *RuncOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *CreateOptions) GetOpenTcp() bool { + if x != nil { + return x.OpenTcp } - return dAtA[:n], nil + return false } -func (m *RuncOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *CreateOptions) GetExternalUnixSockets() bool { + if x != nil { + return x.ExternalUnixSockets + } + return false } -func (m *RuncOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.SystemdCgroup { - i-- - if m.SystemdCgroup { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 +func (x *CreateOptions) GetTerminal() bool { + if x != nil { + return x.Terminal } - if len(m.CriuPath) > 0 { - i -= len(m.CriuPath) - copy(dAtA[i:], m.CriuPath) - i = encodeVarintRunc(dAtA, i, uint64(len(m.CriuPath))) - i-- - dAtA[i] = 0x1a - } - if len(m.RuntimeRoot) > 0 { - i -= len(m.RuntimeRoot) - copy(dAtA[i:], m.RuntimeRoot) - i = encodeVarintRunc(dAtA, i, uint64(len(m.RuntimeRoot))) - i-- - dAtA[i] = 0x12 - } - if len(m.Runtime) > 0 { - i -= len(m.Runtime) - copy(dAtA[i:], m.Runtime) - i = encodeVarintRunc(dAtA, i, uint64(len(m.Runtime))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return false } -func (m *CreateOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *CreateOptions) GetFileLocks() bool { + if x != nil { + return x.FileLocks } - return dAtA[:n], nil + return false } -func (m *CreateOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *CreateOptions) GetEmptyNamespaces() []string { + if x != nil { + return x.EmptyNamespaces + } + return nil } -func (m *CreateOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) +func (x *CreateOptions) GetCgroupsMode() string { + if x != nil { + return x.CgroupsMode } - if len(m.CriuImagePath) > 0 { - i -= len(m.CriuImagePath) - copy(dAtA[i:], m.CriuImagePath) - i = encodeVarintRunc(dAtA, i, uint64(len(m.CriuImagePath))) - i-- - dAtA[i] = 0x6a - } - if len(m.CriuWorkPath) > 0 { - i -= len(m.CriuWorkPath) - copy(dAtA[i:], m.CriuWorkPath) - i = encodeVarintRunc(dAtA, i, uint64(len(m.CriuWorkPath))) - i-- - dAtA[i] = 0x62 - } - if m.IoGid != 0 { - i = encodeVarintRunc(dAtA, i, uint64(m.IoGid)) - i-- - dAtA[i] = 0x58 - } - if m.IoUid != 0 { - i = encodeVarintRunc(dAtA, i, uint64(m.IoUid)) - i-- - dAtA[i] = 0x50 - } - if len(m.ShimCgroup) > 0 { - i -= len(m.ShimCgroup) - copy(dAtA[i:], m.ShimCgroup) - i = encodeVarintRunc(dAtA, i, uint64(len(m.ShimCgroup))) - i-- - dAtA[i] = 0x4a - } - if m.NoNewKeyring { - i-- - if m.NoNewKeyring { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x40 - } - if len(m.CgroupsMode) > 0 { - i -= len(m.CgroupsMode) - copy(dAtA[i:], m.CgroupsMode) - i = encodeVarintRunc(dAtA, i, uint64(len(m.CgroupsMode))) - i-- - dAtA[i] = 0x3a - } - if len(m.EmptyNamespaces) > 0 { - for iNdEx := len(m.EmptyNamespaces) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.EmptyNamespaces[iNdEx]) - copy(dAtA[i:], m.EmptyNamespaces[iNdEx]) - i = encodeVarintRunc(dAtA, i, uint64(len(m.EmptyNamespaces[iNdEx]))) - i-- - dAtA[i] = 0x32 - } - } - if m.FileLocks { - i-- - if m.FileLocks { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if m.Terminal { - i-- - if m.Terminal { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if m.ExternalUnixSockets { - i-- - if m.ExternalUnixSockets { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if m.OpenTcp { - i-- - if m.OpenTcp { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if m.NoPivotRoot { - i-- - if m.NoPivotRoot { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil + return "" } -func (m *CheckpointOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *CreateOptions) GetNoNewKeyring() bool { + if x != nil { + return x.NoNewKeyring } - return dAtA[:n], nil + return false } -func (m *CheckpointOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *CreateOptions) GetShimCgroup() string { + if x != nil { + return x.ShimCgroup + } + return "" } -func (m *CheckpointOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ImagePath) > 0 { - i -= len(m.ImagePath) - copy(dAtA[i:], m.ImagePath) - i = encodeVarintRunc(dAtA, i, uint64(len(m.ImagePath))) - i-- - dAtA[i] = 0x4a - } - if len(m.WorkPath) > 0 { - i -= len(m.WorkPath) - copy(dAtA[i:], m.WorkPath) - i = encodeVarintRunc(dAtA, i, uint64(len(m.WorkPath))) - i-- - dAtA[i] = 0x42 - } - if len(m.CgroupsMode) > 0 { - i -= len(m.CgroupsMode) - copy(dAtA[i:], m.CgroupsMode) - i = encodeVarintRunc(dAtA, i, uint64(len(m.CgroupsMode))) - i-- - dAtA[i] = 0x3a - } - if len(m.EmptyNamespaces) > 0 { - for iNdEx := len(m.EmptyNamespaces) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.EmptyNamespaces[iNdEx]) - copy(dAtA[i:], m.EmptyNamespaces[iNdEx]) - i = encodeVarintRunc(dAtA, i, uint64(len(m.EmptyNamespaces[iNdEx]))) - i-- - dAtA[i] = 0x32 - } - } - if m.FileLocks { - i-- - if m.FileLocks { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if m.Terminal { - i-- - if m.Terminal { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if m.ExternalUnixSockets { - i-- - if m.ExternalUnixSockets { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if m.OpenTcp { - i-- - if m.OpenTcp { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 +func (x *CreateOptions) GetIoUid() uint32 { + if x != nil { + return x.IoUid } - if m.Exit { - i-- - if m.Exit { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil + return 0 } -func (m *ProcessDetails) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *CreateOptions) GetIoGid() uint32 { + if x != nil { + return x.IoGid } - return dAtA[:n], nil + return 0 } -func (m *ProcessDetails) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *CreateOptions) GetCriuWorkPath() string { + if x != nil { + return x.CriuWorkPath + } + return "" } -func (m *ProcessDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) +func (x *CreateOptions) GetCriuImagePath() string { + if x != nil { + return x.CriuImagePath } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintRunc(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return "" } -func encodeVarintRunc(dAtA []byte, offset int, v uint64) int { - offset -= sovRunc(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base +type CheckpointOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Exit bool `protobuf:"varint,1,opt,name=exit,proto3" json:"exit,omitempty"` + OpenTcp bool `protobuf:"varint,2,opt,name=open_tcp,json=openTcp,proto3" json:"open_tcp,omitempty"` + ExternalUnixSockets bool `protobuf:"varint,3,opt,name=external_unix_sockets,json=externalUnixSockets,proto3" json:"external_unix_sockets,omitempty"` + Terminal bool `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"` + FileLocks bool `protobuf:"varint,5,opt,name=file_locks,json=fileLocks,proto3" json:"file_locks,omitempty"` + EmptyNamespaces []string `protobuf:"bytes,6,rep,name=empty_namespaces,json=emptyNamespaces,proto3" json:"empty_namespaces,omitempty"` + CgroupsMode string `protobuf:"bytes,7,opt,name=cgroups_mode,json=cgroupsMode,proto3" json:"cgroups_mode,omitempty"` + WorkPath string `protobuf:"bytes,8,opt,name=work_path,json=workPath,proto3" json:"work_path,omitempty"` + ImagePath string `protobuf:"bytes,9,opt,name=image_path,json=imagePath,proto3" json:"image_path,omitempty"` } -func (m *RuncOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Runtime) - if l > 0 { - n += 1 + l + sovRunc(uint64(l)) - } - l = len(m.RuntimeRoot) - if l > 0 { - n += 1 + l + sovRunc(uint64(l)) - } - l = len(m.CriuPath) - if l > 0 { - n += 1 + l + sovRunc(uint64(l)) - } - if m.SystemdCgroup { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) + +func (x *CheckpointOptions) Reset() { + *x = CheckpointOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return n } -func (m *CreateOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.NoPivotRoot { - n += 2 - } - if m.OpenTcp { - n += 2 - } - if m.ExternalUnixSockets { - n += 2 - } - if m.Terminal { - n += 2 - } - if m.FileLocks { - n += 2 - } - if len(m.EmptyNamespaces) > 0 { - for _, s := range m.EmptyNamespaces { - l = len(s) - n += 1 + l + sovRunc(uint64(l)) - } - } - l = len(m.CgroupsMode) - if l > 0 { - n += 1 + l + sovRunc(uint64(l)) - } - if m.NoNewKeyring { - n += 2 - } - l = len(m.ShimCgroup) - if l > 0 { - n += 1 + l + sovRunc(uint64(l)) - } - if m.IoUid != 0 { - n += 1 + sovRunc(uint64(m.IoUid)) - } - if m.IoGid != 0 { - n += 1 + sovRunc(uint64(m.IoGid)) - } - l = len(m.CriuWorkPath) - if l > 0 { - n += 1 + l + sovRunc(uint64(l)) - } - l = len(m.CriuImagePath) - if l > 0 { - n += 1 + l + sovRunc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n +func (x *CheckpointOptions) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *CheckpointOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Exit { - n += 2 - } - if m.OpenTcp { - n += 2 - } - if m.ExternalUnixSockets { - n += 2 - } - if m.Terminal { - n += 2 - } - if m.FileLocks { - n += 2 - } - if len(m.EmptyNamespaces) > 0 { - for _, s := range m.EmptyNamespaces { - l = len(s) - n += 1 + l + sovRunc(uint64(l)) +func (*CheckpointOptions) ProtoMessage() {} + +func (x *CheckpointOptions) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } - l = len(m.CgroupsMode) - if l > 0 { - n += 1 + l + sovRunc(uint64(l)) - } - l = len(m.WorkPath) - if l > 0 { - n += 1 + l + sovRunc(uint64(l)) - } - l = len(m.ImagePath) - if l > 0 { - n += 1 + l + sovRunc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n + return mi.MessageOf(x) } -func (m *ProcessDetails) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovRunc(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n +// Deprecated: Use CheckpointOptions.ProtoReflect.Descriptor instead. +func (*CheckpointOptions) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDescGZIP(), []int{2} } -func sovRunc(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozRunc(x uint64) (n int) { - return sovRunc(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *RuncOptions) String() string { - if this == nil { - return "nil" +func (x *CheckpointOptions) GetExit() bool { + if x != nil { + return x.Exit } - s := strings.Join([]string{`&RuncOptions{`, - `Runtime:` + fmt.Sprintf("%v", this.Runtime) + `,`, - `RuntimeRoot:` + fmt.Sprintf("%v", this.RuntimeRoot) + `,`, - `CriuPath:` + fmt.Sprintf("%v", this.CriuPath) + `,`, - `SystemdCgroup:` + fmt.Sprintf("%v", this.SystemdCgroup) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s + return false } -func (this *CreateOptions) String() string { - if this == nil { - return "nil" + +func (x *CheckpointOptions) GetOpenTcp() bool { + if x != nil { + return x.OpenTcp } - s := strings.Join([]string{`&CreateOptions{`, - `NoPivotRoot:` + fmt.Sprintf("%v", this.NoPivotRoot) + `,`, - `OpenTcp:` + fmt.Sprintf("%v", this.OpenTcp) + `,`, - `ExternalUnixSockets:` + fmt.Sprintf("%v", this.ExternalUnixSockets) + `,`, - `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, - `FileLocks:` + fmt.Sprintf("%v", this.FileLocks) + `,`, - `EmptyNamespaces:` + fmt.Sprintf("%v", this.EmptyNamespaces) + `,`, - `CgroupsMode:` + fmt.Sprintf("%v", this.CgroupsMode) + `,`, - `NoNewKeyring:` + fmt.Sprintf("%v", this.NoNewKeyring) + `,`, - `ShimCgroup:` + fmt.Sprintf("%v", this.ShimCgroup) + `,`, - `IoUid:` + fmt.Sprintf("%v", this.IoUid) + `,`, - `IoGid:` + fmt.Sprintf("%v", this.IoGid) + `,`, - `CriuWorkPath:` + fmt.Sprintf("%v", this.CriuWorkPath) + `,`, - `CriuImagePath:` + fmt.Sprintf("%v", this.CriuImagePath) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s + return false } -func (this *CheckpointOptions) String() string { - if this == nil { - return "nil" + +func (x *CheckpointOptions) GetExternalUnixSockets() bool { + if x != nil { + return x.ExternalUnixSockets } - s := strings.Join([]string{`&CheckpointOptions{`, - `Exit:` + fmt.Sprintf("%v", this.Exit) + `,`, - `OpenTcp:` + fmt.Sprintf("%v", this.OpenTcp) + `,`, - `ExternalUnixSockets:` + fmt.Sprintf("%v", this.ExternalUnixSockets) + `,`, - `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, - `FileLocks:` + fmt.Sprintf("%v", this.FileLocks) + `,`, - `EmptyNamespaces:` + fmt.Sprintf("%v", this.EmptyNamespaces) + `,`, - `CgroupsMode:` + fmt.Sprintf("%v", this.CgroupsMode) + `,`, - `WorkPath:` + fmt.Sprintf("%v", this.WorkPath) + `,`, - `ImagePath:` + fmt.Sprintf("%v", this.ImagePath) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s + return false } -func (this *ProcessDetails) String() string { - if this == nil { - return "nil" + +func (x *CheckpointOptions) GetTerminal() bool { + if x != nil { + return x.Terminal } - s := strings.Join([]string{`&ProcessDetails{`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s + return false } -func valueToStringRunc(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + +func (x *CheckpointOptions) GetFileLocks() bool { + if x != nil { + return x.FileLocks } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return false } -func (m *RuncOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RuncOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RuncOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Runtime", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Runtime = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeRoot", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RuntimeRoot = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CriuPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CriuPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SystemdCgroup", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.SystemdCgroup = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipRunc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRunc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *CheckpointOptions) GetEmptyNamespaces() []string { + if x != nil { + return x.EmptyNamespaces } return nil } -func (m *CreateOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CreateOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CreateOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NoPivotRoot", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.NoPivotRoot = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OpenTcp", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.OpenTcp = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalUnixSockets", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ExternalUnixSockets = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Terminal = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FileLocks", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.FileLocks = bool(v != 0) - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EmptyNamespaces", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EmptyNamespaces = append(m.EmptyNamespaces, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CgroupsMode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CgroupsMode = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NoNewKeyring", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.NoNewKeyring = bool(v != 0) - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShimCgroup", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ShimCgroup = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IoUid", wireType) - } - m.IoUid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.IoUid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IoGid", wireType) - } - m.IoGid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.IoGid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CriuWorkPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CriuWorkPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CriuImagePath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CriuImagePath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRunc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRunc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *CheckpointOptions) GetCgroupsMode() string { + if x != nil { + return x.CgroupsMode } - return nil + return "" } -func (m *CheckpointOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CheckpointOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CheckpointOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Exit", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Exit = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OpenTcp", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.OpenTcp = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalUnixSockets", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ExternalUnixSockets = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Terminal = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FileLocks", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.FileLocks = bool(v != 0) - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EmptyNamespaces", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EmptyNamespaces = append(m.EmptyNamespaces, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CgroupsMode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CgroupsMode = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.WorkPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImagePath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ImagePath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRunc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRunc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF +func (x *CheckpointOptions) GetWorkPath() string { + if x != nil { + return x.WorkPath } - return nil + return "" } -func (m *ProcessDetails) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProcessDetails: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProcessDetails: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRunc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthRunc - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthRunc - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRunc(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthRunc - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } + +func (x *CheckpointOptions) GetImagePath() string { + if x != nil { + return x.ImagePath } + return "" +} + +type ProcessDetails struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields - if iNdEx > l { - return io.ErrUnexpectedEOF + ExecID string `protobuf:"bytes,1,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` +} + +func (x *ProcessDetails) Reset() { + *x = ProcessDetails{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return nil } -func skipRunc(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRunc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRunc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRunc - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthRunc - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupRunc - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthRunc - } - if depth == 0 { - return iNdEx, nil + +func (x *ProcessDetails) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProcessDetails) ProtoMessage() {} + +func (x *ProcessDetails) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } - return 0, io.ErrUnexpectedEOF + return mi.MessageOf(x) +} + +// Deprecated: Use ProcessDetails.ProtoReflect.Descriptor instead. +func (*ProcessDetails) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDescGZIP(), []int{3} +} + +func (x *ProcessDetails) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} + +var File_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDesc = []byte{ + 0x0a, 0x43, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2f, 0x6c, 0x69, 0x6e, 0x75, 0x78, + 0x2f, 0x72, 0x75, 0x6e, 0x63, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x72, 0x75, 0x6e, 0x63, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x6c, 0x69, 0x6e, 0x75, 0x78, 0x2e, 0x72, 0x75, 0x6e, 0x63, 0x22, 0x92, 0x01, 0x0a, + 0x0b, 0x52, 0x75, 0x6e, 0x63, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x18, 0x0a, 0x07, + 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x72, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, + 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x1f, 0x0a, 0x09, 0x63, 0x72, 0x69, + 0x75, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, + 0x52, 0x08, 0x63, 0x72, 0x69, 0x75, 0x50, 0x61, 0x74, 0x68, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x79, + 0x73, 0x74, 0x65, 0x6d, 0x64, 0x5f, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0d, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x64, 0x43, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x22, 0xce, 0x03, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, 0x6f, 0x5f, 0x70, 0x69, 0x76, 0x6f, 0x74, 0x5f, + 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6e, 0x6f, 0x50, 0x69, + 0x76, 0x6f, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x70, 0x65, 0x6e, 0x5f, + 0x74, 0x63, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x6f, 0x70, 0x65, 0x6e, 0x54, + 0x63, 0x70, 0x12, 0x32, 0x0a, 0x15, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x75, + 0x6e, 0x69, 0x78, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x13, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x55, 0x6e, 0x69, 0x78, 0x53, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, + 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, + 0x61, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x73, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x6b, + 0x73, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x65, 0x6d, 0x70, + 0x74, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, + 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x4d, 0x6f, 0x64, 0x65, 0x12, + 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x72, 0x69, 0x6e, + 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x6e, 0x6f, 0x4e, 0x65, 0x77, 0x4b, 0x65, + 0x79, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x68, 0x69, 0x6d, 0x5f, 0x63, 0x67, + 0x72, 0x6f, 0x75, 0x70, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x68, 0x69, 0x6d, + 0x43, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x15, 0x0a, 0x06, 0x69, 0x6f, 0x5f, 0x75, 0x69, 0x64, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x69, 0x6f, 0x55, 0x69, 0x64, 0x12, 0x15, 0x0a, + 0x06, 0x69, 0x6f, 0x5f, 0x67, 0x69, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x69, + 0x6f, 0x47, 0x69, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x77, 0x6f, 0x72, + 0x6b, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x72, + 0x69, 0x75, 0x57, 0x6f, 0x72, 0x6b, 0x50, 0x61, 0x74, 0x68, 0x12, 0x26, 0x0a, 0x0f, 0x63, 0x72, + 0x69, 0x75, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x0d, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x72, 0x69, 0x75, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x50, 0x61, + 0x74, 0x68, 0x22, 0xbb, 0x02, 0x0a, 0x11, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x78, 0x69, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x65, 0x78, 0x69, 0x74, 0x12, 0x19, 0x0a, 0x08, + 0x6f, 0x70, 0x65, 0x6e, 0x5f, 0x74, 0x63, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, + 0x6f, 0x70, 0x65, 0x6e, 0x54, 0x63, 0x70, 0x12, 0x32, 0x0a, 0x15, 0x65, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x55, 0x6e, 0x69, 0x78, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x74, + 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x74, + 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x5f, + 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x66, 0x69, 0x6c, + 0x65, 0x4c, 0x6f, 0x63, 0x6b, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x5f, 0x6d, 0x6f, 0x64, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, + 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x70, 0x61, 0x74, + 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, 0x72, 0x6b, 0x50, 0x61, 0x74, + 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x50, 0x61, 0x74, 0x68, + 0x22, 0x29, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, + 0x6c, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, 0x64, 0x42, 0x44, 0x5a, 0x42, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, + 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2f, 0x6c, 0x69, 0x6e, 0x75, 0x78, 0x2f, 0x72, 0x75, + 0x6e, 0x63, 0x74, 0x79, 0x70, 0x65, 0x73, 0x3b, 0x72, 0x75, 0x6e, 0x63, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( - ErrInvalidLengthRunc = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowRunc = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupRunc = fmt.Errorf("proto: unexpected end of group") + file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDescData = file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDesc ) + +func file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDescData) + }) + return file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDescData +} + +var file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_goTypes = []interface{}{ + (*RuncOptions)(nil), // 0: containerd.linux.runc.RuncOptions + (*CreateOptions)(nil), // 1: containerd.linux.runc.CreateOptions + (*CheckpointOptions)(nil), // 2: containerd.linux.runc.CheckpointOptions + (*ProcessDetails)(nil), // 3: containerd.linux.runc.ProcessDetails +} +var file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_init() } +func file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_init() { + if File_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RuncOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckpointOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProcessDetails); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto = out.File + file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_rawDesc = nil + file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_goTypes = nil + file_github_com_containerd_containerd_runtime_linux_runctypes_runc_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.proto b/vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.proto index 78e3abf4cb776..5e2d675fbf8da 100644 --- a/vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.proto +++ b/vendor/github.com/containerd/containerd/runtime/linux/runctypes/runc.proto @@ -2,14 +2,18 @@ syntax = "proto3"; package containerd.linux.runc; -import weak "gogoproto/gogo.proto"; - option go_package = "github.com/containerd/containerd/runtime/linux/runctypes;runctypes"; message RuncOptions { string runtime = 1; string runtime_root = 2; - string criu_path = 3; + // criu binary path. + // + // Deprecated: runc option --criu is now ignored (with a warning), and the + // option will be removed entirely in a future release. Users who need a non- + // standard criu binary should rely on the standard way of looking up binaries + // in $PATH. + string criu_path = 3 [deprecated = true]; bool systemd_cgroup = 4; } diff --git a/vendor/github.com/containerd/containerd/runtime/v2/runc/options/next.pb.txt b/vendor/github.com/containerd/containerd/runtime/v2/runc/options/next.pb.txt index 7a29ff31c6e9f..76478c5f6763d 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/runc/options/next.pb.txt +++ b/vendor/github.com/containerd/containerd/runtime/v2/runc/options/next.pb.txt @@ -1,7 +1,6 @@ file { name: "github.com/containerd/containerd/runtime/v2/runc/options/oci.proto" package: "containerd.runc.v1" - dependency: "gogoproto/gogo.proto" message_type { name: "Options" field { @@ -58,6 +57,9 @@ file { number: 8 label: LABEL_OPTIONAL type: TYPE_STRING + options { + deprecated: true + } json_name: "criuPath" } field { @@ -161,6 +163,5 @@ file { options { go_package: "github.com/containerd/containerd/runtime/v2/runc/options;options" } - weak_dependency: 0 syntax: "proto3" } diff --git a/vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.pb.go b/vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.pb.go index c9c44742a2588..2d90df35cd996 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.pb.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.pb.go @@ -1,30 +1,30 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 // source: github.com/containerd/containerd/runtime/v2/runc/options/oci.proto package options import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" - strings "strings" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type Options struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // disable pivot root when creating a container NoPivotRoot bool `protobuf:"varint,1,opt,name=no_pivot_root,json=noPivotRoot,proto3" json:"no_pivot_root,omitempty"` // create a new keyring for the container @@ -39,52 +39,138 @@ type Options struct { BinaryName string `protobuf:"bytes,6,opt,name=binary_name,json=binaryName,proto3" json:"binary_name,omitempty"` // runc root directory Root string `protobuf:"bytes,7,opt,name=root,proto3" json:"root,omitempty"` - // criu binary path + // criu binary path. + // + // Deprecated: runc option --criu is now ignored (with a warning), and the + // option will be removed entirely in a future release. Users who need a non- + // standard criu binary should rely on the standard way of looking up binaries + // in $PATH. + // + // Deprecated: Do not use. CriuPath string `protobuf:"bytes,8,opt,name=criu_path,json=criuPath,proto3" json:"criu_path,omitempty"` // enable systemd cgroups SystemdCgroup bool `protobuf:"varint,9,opt,name=systemd_cgroup,json=systemdCgroup,proto3" json:"systemd_cgroup,omitempty"` // criu image path CriuImagePath string `protobuf:"bytes,10,opt,name=criu_image_path,json=criuImagePath,proto3" json:"criu_image_path,omitempty"` // criu work path - CriuWorkPath string `protobuf:"bytes,11,opt,name=criu_work_path,json=criuWorkPath,proto3" json:"criu_work_path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + CriuWorkPath string `protobuf:"bytes,11,opt,name=criu_work_path,json=criuWorkPath,proto3" json:"criu_work_path,omitempty"` +} + +func (x *Options) Reset() { + *x = Options{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Options) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *Options) Reset() { *m = Options{} } func (*Options) ProtoMessage() {} + +func (x *Options) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Options.ProtoReflect.Descriptor instead. func (*Options) Descriptor() ([]byte, []int) { - return fileDescriptor_4e5440d739e9a863, []int{0} + return file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_rawDescGZIP(), []int{0} } -func (m *Options) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + +func (x *Options) GetNoPivotRoot() bool { + if x != nil { + return x.NoPivotRoot + } + return false } -func (m *Options) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Options.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil + +func (x *Options) GetNoNewKeyring() bool { + if x != nil { + return x.NoNewKeyring } + return false } -func (m *Options) XXX_Merge(src proto.Message) { - xxx_messageInfo_Options.Merge(m, src) + +func (x *Options) GetShimCgroup() string { + if x != nil { + return x.ShimCgroup + } + return "" +} + +func (x *Options) GetIoUid() uint32 { + if x != nil { + return x.IoUid + } + return 0 +} + +func (x *Options) GetIoGid() uint32 { + if x != nil { + return x.IoGid + } + return 0 +} + +func (x *Options) GetBinaryName() string { + if x != nil { + return x.BinaryName + } + return "" +} + +func (x *Options) GetRoot() string { + if x != nil { + return x.Root + } + return "" +} + +// Deprecated: Do not use. +func (x *Options) GetCriuPath() string { + if x != nil { + return x.CriuPath + } + return "" } -func (m *Options) XXX_Size() int { - return m.Size() + +func (x *Options) GetSystemdCgroup() bool { + if x != nil { + return x.SystemdCgroup + } + return false } -func (m *Options) XXX_DiscardUnknown() { - xxx_messageInfo_Options.DiscardUnknown(m) + +func (x *Options) GetCriuImagePath() string { + if x != nil { + return x.CriuImagePath + } + return "" } -var xxx_messageInfo_Options proto.InternalMessageInfo +func (x *Options) GetCriuWorkPath() string { + if x != nil { + return x.CriuWorkPath + } + return "" +} type CheckpointOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // exit the container after a checkpoint Exit bool `protobuf:"varint,1,opt,name=exit,proto3" json:"exit,omitempty"` // checkpoint open tcp connections @@ -102,1357 +188,298 @@ type CheckpointOptions struct { // checkpoint image path ImagePath string `protobuf:"bytes,8,opt,name=image_path,json=imagePath,proto3" json:"image_path,omitempty"` // checkpoint work path - WorkPath string `protobuf:"bytes,9,opt,name=work_path,json=workPath,proto3" json:"work_path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + WorkPath string `protobuf:"bytes,9,opt,name=work_path,json=workPath,proto3" json:"work_path,omitempty"` } -func (m *CheckpointOptions) Reset() { *m = CheckpointOptions{} } -func (*CheckpointOptions) ProtoMessage() {} -func (*CheckpointOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_4e5440d739e9a863, []int{1} -} -func (m *CheckpointOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CheckpointOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CheckpointOptions.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil +func (x *CheckpointOptions) Reset() { + *x = CheckpointOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } } -func (m *CheckpointOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_CheckpointOptions.Merge(m, src) -} -func (m *CheckpointOptions) XXX_Size() int { - return m.Size() -} -func (m *CheckpointOptions) XXX_DiscardUnknown() { - xxx_messageInfo_CheckpointOptions.DiscardUnknown(m) -} -var xxx_messageInfo_CheckpointOptions proto.InternalMessageInfo - -type ProcessDetails struct { - // exec process id if the process is managed by a shim - ExecID string `protobuf:"bytes,1,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x *CheckpointOptions) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ProcessDetails) Reset() { *m = ProcessDetails{} } -func (*ProcessDetails) ProtoMessage() {} -func (*ProcessDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_4e5440d739e9a863, []int{2} -} -func (m *ProcessDetails) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProcessDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProcessDetails.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (*CheckpointOptions) ProtoMessage() {} + +func (x *CheckpointOptions) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } - return b[:n], nil + return ms } + return mi.MessageOf(x) } -func (m *ProcessDetails) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProcessDetails.Merge(m, src) -} -func (m *ProcessDetails) XXX_Size() int { - return m.Size() -} -func (m *ProcessDetails) XXX_DiscardUnknown() { - xxx_messageInfo_ProcessDetails.DiscardUnknown(m) -} - -var xxx_messageInfo_ProcessDetails proto.InternalMessageInfo -func init() { - proto.RegisterType((*Options)(nil), "containerd.runc.v1.Options") - proto.RegisterType((*CheckpointOptions)(nil), "containerd.runc.v1.CheckpointOptions") - proto.RegisterType((*ProcessDetails)(nil), "containerd.runc.v1.ProcessDetails") +// Deprecated: Use CheckpointOptions.ProtoReflect.Descriptor instead. +func (*CheckpointOptions) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_rawDescGZIP(), []int{1} } -func init() { - proto.RegisterFile("github.com/containerd/containerd/runtime/v2/runc/options/oci.proto", fileDescriptor_4e5440d739e9a863) +func (x *CheckpointOptions) GetExit() bool { + if x != nil { + return x.Exit + } + return false } -var fileDescriptor_4e5440d739e9a863 = []byte{ - // 587 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcf, 0x6e, 0xd3, 0x40, - 0x10, 0x87, 0xeb, 0xfe, 0x49, 0xec, 0x4d, 0x93, 0xc2, 0x42, 0x25, 0xd3, 0x8a, 0x34, 0x94, 0x82, - 0xc2, 0x25, 0x11, 0x45, 0x9c, 0xb8, 0xa0, 0xb6, 0x08, 0x55, 0x40, 0xa9, 0x0c, 0x15, 0xa8, 0x97, - 0x95, 0xbb, 0x1e, 0x9c, 0x51, 0xe2, 0x1d, 0xcb, 0xbb, 0x69, 0xd2, 0x1b, 0xef, 0xc5, 0x0b, 0xf4, - 0xc8, 0x91, 0x13, 0xa2, 0xb9, 0xf1, 0x16, 0x68, 0xd7, 0x4e, 0xdb, 0x33, 0x27, 0xcf, 0x7e, 0xf3, - 0xf3, 0x78, 0xfd, 0xad, 0x96, 0xed, 0xa5, 0x68, 0x06, 0xe3, 0xb3, 0x9e, 0xa4, 0xac, 0x2f, 0x49, - 0x99, 0x18, 0x15, 0x14, 0xc9, 0xed, 0xb2, 0x18, 0x2b, 0x83, 0x19, 0xf4, 0xcf, 0x77, 0x6d, 0x29, - 0xfb, 0x94, 0x1b, 0x24, 0xa5, 0xfb, 0x24, 0xb1, 0x97, 0x17, 0x64, 0x88, 0xf3, 0x9b, 0x74, 0xcf, - 0x46, 0x7a, 0xe7, 0xcf, 0x37, 0xee, 0xa7, 0x94, 0x92, 0x6b, 0xf7, 0x6d, 0x55, 0x26, 0xb7, 0xff, - 0x2e, 0xb2, 0xfa, 0xc7, 0xf2, 0x7d, 0xbe, 0xcd, 0x9a, 0x8a, 0x44, 0x8e, 0xe7, 0x64, 0x44, 0x41, - 0x64, 0x42, 0xaf, 0xe3, 0x75, 0xfd, 0xa8, 0xa1, 0xe8, 0xd8, 0xb2, 0x88, 0xc8, 0xf0, 0x1d, 0xd6, - 0x52, 0x24, 0x14, 0x4c, 0xc4, 0x10, 0x2e, 0x0a, 0x54, 0x69, 0xb8, 0xe8, 0x42, 0xab, 0x8a, 0x8e, - 0x60, 0xf2, 0xae, 0x64, 0x7c, 0x8b, 0x35, 0xf4, 0x00, 0x33, 0x21, 0xd3, 0x82, 0xc6, 0x79, 0xb8, - 0xd4, 0xf1, 0xba, 0x41, 0xc4, 0x2c, 0xda, 0x77, 0x84, 0xaf, 0xb3, 0x1a, 0x92, 0x18, 0x63, 0x12, - 0x2e, 0x77, 0xbc, 0x6e, 0x33, 0x5a, 0x41, 0x3a, 0xc1, 0xa4, 0xc2, 0x29, 0x26, 0xe1, 0xca, 0x1c, - 0xbf, 0xc5, 0xc4, 0x8e, 0x3b, 0x43, 0x15, 0x17, 0x17, 0x42, 0xc5, 0x19, 0x84, 0xb5, 0x72, 0x5c, - 0x89, 0x8e, 0xe2, 0x0c, 0x38, 0x67, 0xcb, 0x6e, 0xc3, 0x75, 0xd7, 0x71, 0x35, 0xdf, 0x64, 0x81, - 0x2c, 0x70, 0x2c, 0xf2, 0xd8, 0x0c, 0x42, 0xdf, 0x35, 0x7c, 0x0b, 0x8e, 0x63, 0x33, 0xe0, 0x4f, - 0x58, 0x4b, 0x5f, 0x68, 0x03, 0x59, 0x32, 0xdf, 0x63, 0xe0, 0x7e, 0xa3, 0x59, 0xd1, 0x6a, 0x9b, - 0x4f, 0xd9, 0x9a, 0x9b, 0x81, 0x59, 0x9c, 0x42, 0x39, 0x89, 0xb9, 0x49, 0x4d, 0x8b, 0x0f, 0x2d, - 0x75, 0xe3, 0x76, 0x58, 0xcb, 0xe5, 0x26, 0x54, 0x0c, 0xcb, 0x58, 0xc3, 0xc5, 0x56, 0x2d, 0xfd, - 0x42, 0xc5, 0xd0, 0xa6, 0xb6, 0x7f, 0x2c, 0xb2, 0xbb, 0xfb, 0x03, 0x90, 0xc3, 0x9c, 0x50, 0x99, - 0xb9, 0x75, 0xce, 0x96, 0x61, 0x8a, 0x73, 0xd9, 0xae, 0xe6, 0x0f, 0x98, 0x4f, 0x39, 0x28, 0x61, - 0x64, 0x5e, 0xf9, 0xad, 0xdb, 0xf5, 0x67, 0x99, 0xf3, 0x5d, 0xb6, 0x0e, 0x53, 0x03, 0x85, 0x8a, - 0x47, 0x62, 0xac, 0x70, 0x2a, 0x34, 0xc9, 0x21, 0x18, 0xed, 0x24, 0xfb, 0xd1, 0xbd, 0x79, 0xf3, - 0x44, 0xe1, 0xf4, 0x53, 0xd9, 0xe2, 0x1b, 0xcc, 0x37, 0x50, 0x64, 0xa8, 0xe2, 0x91, 0xf3, 0xed, - 0x47, 0xd7, 0x6b, 0xfe, 0x90, 0xb1, 0x6f, 0x38, 0x02, 0x31, 0x22, 0x39, 0xd4, 0x4e, 0xbb, 0x1f, - 0x05, 0x96, 0xbc, 0xb7, 0x80, 0x3f, 0x63, 0x77, 0x20, 0xcb, 0x4d, 0x69, 0x5e, 0xe7, 0xb1, 0x04, - 0x1d, 0xd6, 0x3a, 0x4b, 0xdd, 0x20, 0x5a, 0x73, 0xfc, 0xe8, 0x1a, 0xf3, 0x47, 0x6c, 0xb5, 0x74, - 0xa9, 0x45, 0x46, 0x09, 0x54, 0x87, 0xd1, 0xa8, 0xd8, 0x07, 0x4a, 0xc0, 0x7e, 0xec, 0x96, 0xca, - 0xf2, 0x50, 0x02, 0xbc, 0xd6, 0xb8, 0xc9, 0x82, 0x1b, 0x83, 0x41, 0x79, 0x64, 0x93, 0xb9, 0xbd, - 0x97, 0xac, 0x75, 0x5c, 0x90, 0x04, 0xad, 0x0f, 0xc0, 0xc4, 0x38, 0xd2, 0xfc, 0x31, 0xab, 0xc3, - 0x14, 0xa4, 0xc0, 0xc4, 0xc9, 0x0b, 0xf6, 0xd8, 0xec, 0xf7, 0x56, 0xed, 0xcd, 0x14, 0xe4, 0xe1, - 0x41, 0x54, 0xb3, 0xad, 0xc3, 0x64, 0xef, 0xf4, 0xf2, 0xaa, 0xbd, 0xf0, 0xeb, 0xaa, 0xbd, 0xf0, - 0x7d, 0xd6, 0xf6, 0x2e, 0x67, 0x6d, 0xef, 0xe7, 0xac, 0xed, 0xfd, 0x99, 0xb5, 0xbd, 0xd3, 0xd7, - 0xff, 0x7b, 0xd1, 0x5e, 0x55, 0xcf, 0xaf, 0x0b, 0x67, 0x35, 0x77, 0x8b, 0x5e, 0xfc, 0x0b, 0x00, - 0x00, 0xff, 0xff, 0x90, 0x50, 0x79, 0xf2, 0xb5, 0x03, 0x00, 0x00, +func (x *CheckpointOptions) GetOpenTcp() bool { + if x != nil { + return x.OpenTcp + } + return false } -func (m *Options) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *CheckpointOptions) GetExternalUnixSockets() bool { + if x != nil { + return x.ExternalUnixSockets } - return dAtA[:n], nil + return false } -func (m *Options) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *CheckpointOptions) GetTerminal() bool { + if x != nil { + return x.Terminal + } + return false } -func (m *Options) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.CriuWorkPath) > 0 { - i -= len(m.CriuWorkPath) - copy(dAtA[i:], m.CriuWorkPath) - i = encodeVarintOci(dAtA, i, uint64(len(m.CriuWorkPath))) - i-- - dAtA[i] = 0x5a - } - if len(m.CriuImagePath) > 0 { - i -= len(m.CriuImagePath) - copy(dAtA[i:], m.CriuImagePath) - i = encodeVarintOci(dAtA, i, uint64(len(m.CriuImagePath))) - i-- - dAtA[i] = 0x52 +func (x *CheckpointOptions) GetFileLocks() bool { + if x != nil { + return x.FileLocks } - if m.SystemdCgroup { - i-- - if m.SystemdCgroup { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x48 - } - if len(m.CriuPath) > 0 { - i -= len(m.CriuPath) - copy(dAtA[i:], m.CriuPath) - i = encodeVarintOci(dAtA, i, uint64(len(m.CriuPath))) - i-- - dAtA[i] = 0x42 - } - if len(m.Root) > 0 { - i -= len(m.Root) - copy(dAtA[i:], m.Root) - i = encodeVarintOci(dAtA, i, uint64(len(m.Root))) - i-- - dAtA[i] = 0x3a - } - if len(m.BinaryName) > 0 { - i -= len(m.BinaryName) - copy(dAtA[i:], m.BinaryName) - i = encodeVarintOci(dAtA, i, uint64(len(m.BinaryName))) - i-- - dAtA[i] = 0x32 - } - if m.IoGid != 0 { - i = encodeVarintOci(dAtA, i, uint64(m.IoGid)) - i-- - dAtA[i] = 0x28 - } - if m.IoUid != 0 { - i = encodeVarintOci(dAtA, i, uint64(m.IoUid)) - i-- - dAtA[i] = 0x20 - } - if len(m.ShimCgroup) > 0 { - i -= len(m.ShimCgroup) - copy(dAtA[i:], m.ShimCgroup) - i = encodeVarintOci(dAtA, i, uint64(len(m.ShimCgroup))) - i-- - dAtA[i] = 0x1a - } - if m.NoNewKeyring { - i-- - if m.NoNewKeyring { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if m.NoPivotRoot { - i-- - if m.NoPivotRoot { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil + return false } -func (m *CheckpointOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *CheckpointOptions) GetEmptyNamespaces() []string { + if x != nil { + return x.EmptyNamespaces } - return dAtA[:n], nil + return nil } -func (m *CheckpointOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (x *CheckpointOptions) GetCgroupsMode() string { + if x != nil { + return x.CgroupsMode + } + return "" } -func (m *CheckpointOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.WorkPath) > 0 { - i -= len(m.WorkPath) - copy(dAtA[i:], m.WorkPath) - i = encodeVarintOci(dAtA, i, uint64(len(m.WorkPath))) - i-- - dAtA[i] = 0x4a - } - if len(m.ImagePath) > 0 { - i -= len(m.ImagePath) - copy(dAtA[i:], m.ImagePath) - i = encodeVarintOci(dAtA, i, uint64(len(m.ImagePath))) - i-- - dAtA[i] = 0x42 - } - if len(m.CgroupsMode) > 0 { - i -= len(m.CgroupsMode) - copy(dAtA[i:], m.CgroupsMode) - i = encodeVarintOci(dAtA, i, uint64(len(m.CgroupsMode))) - i-- - dAtA[i] = 0x3a +func (x *CheckpointOptions) GetImagePath() string { + if x != nil { + return x.ImagePath } - if len(m.EmptyNamespaces) > 0 { - for iNdEx := len(m.EmptyNamespaces) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.EmptyNamespaces[iNdEx]) - copy(dAtA[i:], m.EmptyNamespaces[iNdEx]) - i = encodeVarintOci(dAtA, i, uint64(len(m.EmptyNamespaces[iNdEx]))) - i-- - dAtA[i] = 0x32 - } - } - if m.FileLocks { - i-- - if m.FileLocks { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if m.Terminal { - i-- - if m.Terminal { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if m.ExternalUnixSockets { - i-- - if m.ExternalUnixSockets { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if m.OpenTcp { - i-- - if m.OpenTcp { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if m.Exit { - i-- - if m.Exit { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil + return "" } -func (m *ProcessDetails) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (x *CheckpointOptions) GetWorkPath() string { + if x != nil { + return x.WorkPath } - return dAtA[:n], nil + return "" } -func (m *ProcessDetails) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} +type ProcessDetails struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func (m *ProcessDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintOci(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + // exec process id if the process is managed by a shim + ExecID string `protobuf:"bytes,1,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` } -func encodeVarintOci(dAtA []byte, offset int, v uint64) int { - offset -= sovOci(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (x *ProcessDetails) Reset() { + *x = ProcessDetails{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - dAtA[offset] = uint8(v) - return base -} -func (m *Options) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.NoPivotRoot { - n += 2 - } - if m.NoNewKeyring { - n += 2 - } - l = len(m.ShimCgroup) - if l > 0 { - n += 1 + l + sovOci(uint64(l)) - } - if m.IoUid != 0 { - n += 1 + sovOci(uint64(m.IoUid)) - } - if m.IoGid != 0 { - n += 1 + sovOci(uint64(m.IoGid)) - } - l = len(m.BinaryName) - if l > 0 { - n += 1 + l + sovOci(uint64(l)) - } - l = len(m.Root) - if l > 0 { - n += 1 + l + sovOci(uint64(l)) - } - l = len(m.CriuPath) - if l > 0 { - n += 1 + l + sovOci(uint64(l)) - } - if m.SystemdCgroup { - n += 2 - } - l = len(m.CriuImagePath) - if l > 0 { - n += 1 + l + sovOci(uint64(l)) - } - l = len(m.CriuWorkPath) - if l > 0 { - n += 1 + l + sovOci(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n } -func (m *CheckpointOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Exit { - n += 2 - } - if m.OpenTcp { - n += 2 - } - if m.ExternalUnixSockets { - n += 2 - } - if m.Terminal { - n += 2 - } - if m.FileLocks { - n += 2 - } - if len(m.EmptyNamespaces) > 0 { - for _, s := range m.EmptyNamespaces { - l = len(s) - n += 1 + l + sovOci(uint64(l)) - } - } - l = len(m.CgroupsMode) - if l > 0 { - n += 1 + l + sovOci(uint64(l)) - } - l = len(m.ImagePath) - if l > 0 { - n += 1 + l + sovOci(uint64(l)) - } - l = len(m.WorkPath) - if l > 0 { - n += 1 + l + sovOci(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n +func (x *ProcessDetails) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ProcessDetails) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovOci(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} +func (*ProcessDetails) ProtoMessage() {} -func sovOci(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozOci(x uint64) (n int) { - return sovOci(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Options) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Options{`, - `NoPivotRoot:` + fmt.Sprintf("%v", this.NoPivotRoot) + `,`, - `NoNewKeyring:` + fmt.Sprintf("%v", this.NoNewKeyring) + `,`, - `ShimCgroup:` + fmt.Sprintf("%v", this.ShimCgroup) + `,`, - `IoUid:` + fmt.Sprintf("%v", this.IoUid) + `,`, - `IoGid:` + fmt.Sprintf("%v", this.IoGid) + `,`, - `BinaryName:` + fmt.Sprintf("%v", this.BinaryName) + `,`, - `Root:` + fmt.Sprintf("%v", this.Root) + `,`, - `CriuPath:` + fmt.Sprintf("%v", this.CriuPath) + `,`, - `SystemdCgroup:` + fmt.Sprintf("%v", this.SystemdCgroup) + `,`, - `CriuImagePath:` + fmt.Sprintf("%v", this.CriuImagePath) + `,`, - `CriuWorkPath:` + fmt.Sprintf("%v", this.CriuWorkPath) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CheckpointOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CheckpointOptions{`, - `Exit:` + fmt.Sprintf("%v", this.Exit) + `,`, - `OpenTcp:` + fmt.Sprintf("%v", this.OpenTcp) + `,`, - `ExternalUnixSockets:` + fmt.Sprintf("%v", this.ExternalUnixSockets) + `,`, - `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, - `FileLocks:` + fmt.Sprintf("%v", this.FileLocks) + `,`, - `EmptyNamespaces:` + fmt.Sprintf("%v", this.EmptyNamespaces) + `,`, - `CgroupsMode:` + fmt.Sprintf("%v", this.CgroupsMode) + `,`, - `ImagePath:` + fmt.Sprintf("%v", this.ImagePath) + `,`, - `WorkPath:` + fmt.Sprintf("%v", this.WorkPath) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ProcessDetails) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ProcessDetails{`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringOci(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Options) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Options: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Options: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NoPivotRoot", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.NoPivotRoot = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NoNewKeyring", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.NoNewKeyring = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShimCgroup", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOci - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOci - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ShimCgroup = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IoUid", wireType) - } - m.IoUid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.IoUid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IoGid", wireType) - } - m.IoGid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.IoGid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BinaryName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOci - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOci - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.BinaryName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Root", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOci - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOci - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Root = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CriuPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOci - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOci - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CriuPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SystemdCgroup", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.SystemdCgroup = bool(v != 0) - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CriuImagePath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOci - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOci - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CriuImagePath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CriuWorkPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOci - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOci - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CriuWorkPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOci(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOci - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy +func (x *ProcessDetails) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) } + return ms } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil + return mi.MessageOf(x) } -func (m *CheckpointOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CheckpointOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CheckpointOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Exit", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Exit = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OpenTcp", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.OpenTcp = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalUnixSockets", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ExternalUnixSockets = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Terminal = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FileLocks", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.FileLocks = bool(v != 0) - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EmptyNamespaces", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOci - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOci - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EmptyNamespaces = append(m.EmptyNamespaces, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CgroupsMode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOci - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOci - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CgroupsMode = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImagePath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOci - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOci - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ImagePath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOci - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOci - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.WorkPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOci(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOci - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil +// Deprecated: Use ProcessDetails.ProtoReflect.Descriptor instead. +func (*ProcessDetails) Descriptor() ([]byte, []int) { + return file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_rawDescGZIP(), []int{2} +} + +func (x *ProcessDetails) GetExecID() string { + if x != nil { + return x.ExecID + } + return "" +} + +var File_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto protoreflect.FileDescriptor + +var file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_rawDesc = []byte{ + 0x0a, 0x42, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x72, 0x75, + 0x6e, 0x63, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f, 0x63, 0x69, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x72, 0x75, 0x6e, 0x63, 0x2e, 0x76, 0x31, 0x22, 0xed, 0x02, 0x0a, 0x07, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, 0x6f, 0x5f, 0x70, 0x69, 0x76, 0x6f, 0x74, + 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6e, 0x6f, 0x50, + 0x69, 0x76, 0x6f, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x5f, 0x6e, + 0x65, 0x77, 0x5f, 0x6b, 0x65, 0x79, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0c, 0x6e, 0x6f, 0x4e, 0x65, 0x77, 0x4b, 0x65, 0x79, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x1f, + 0x0a, 0x0b, 0x73, 0x68, 0x69, 0x6d, 0x5f, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x68, 0x69, 0x6d, 0x43, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x12, + 0x15, 0x0a, 0x06, 0x69, 0x6f, 0x5f, 0x75, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x05, 0x69, 0x6f, 0x55, 0x69, 0x64, 0x12, 0x15, 0x0a, 0x06, 0x69, 0x6f, 0x5f, 0x67, 0x69, 0x64, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x69, 0x6f, 0x47, 0x69, 0x64, 0x12, 0x1f, 0x0a, + 0x0b, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, + 0x6f, 0x74, 0x12, 0x1f, 0x0a, 0x09, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x08, 0x63, 0x72, 0x69, 0x75, 0x50, + 0x61, 0x74, 0x68, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x64, 0x5f, 0x63, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x79, 0x73, + 0x74, 0x65, 0x6d, 0x64, 0x43, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x26, 0x0a, 0x0f, 0x63, 0x72, + 0x69, 0x75, 0x5f, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x72, 0x69, 0x75, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x50, 0x61, + 0x74, 0x68, 0x12, 0x24, 0x0a, 0x0e, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x5f, + 0x70, 0x61, 0x74, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x72, 0x69, 0x75, + 0x57, 0x6f, 0x72, 0x6b, 0x50, 0x61, 0x74, 0x68, 0x22, 0xbb, 0x02, 0x0a, 0x11, 0x43, 0x68, 0x65, + 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x12, + 0x0a, 0x04, 0x65, 0x78, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x65, 0x78, + 0x69, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x70, 0x65, 0x6e, 0x5f, 0x74, 0x63, 0x70, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x6f, 0x70, 0x65, 0x6e, 0x54, 0x63, 0x70, 0x12, 0x32, 0x0a, + 0x15, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x73, + 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x65, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x55, 0x6e, 0x69, 0x78, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x12, 0x1d, 0x0a, + 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x4c, 0x6f, 0x63, 0x6b, 0x73, 0x12, 0x29, 0x0a, 0x10, + 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x4e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x73, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6d, + 0x61, 0x67, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x69, 0x6d, 0x61, 0x67, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1b, 0x0a, 0x09, 0x77, 0x6f, 0x72, + 0x6b, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x77, 0x6f, + 0x72, 0x6b, 0x50, 0x61, 0x74, 0x68, 0x22, 0x29, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, + 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x65, 0x78, 0x65, 0x63, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x78, 0x65, 0x63, 0x49, + 0x64, 0x42, 0x42, 0x5a, 0x40, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2f, 0x76, 0x32, + 0x2f, 0x72, 0x75, 0x6e, 0x63, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func (m *ProcessDetails) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProcessDetails: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProcessDetails: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOci - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOci - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOci - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOci(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOci - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipOci(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowOci - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break +var ( + file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_rawDescOnce sync.Once + file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_rawDescData = file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_rawDesc +) + +func file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_rawDescGZIP() []byte { + file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_rawDescOnce.Do(func() { + file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_rawDescData) + }) + return file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_rawDescData +} + +var file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_goTypes = []interface{}{ + (*Options)(nil), // 0: containerd.runc.v1.Options + (*CheckpointOptions)(nil), // 1: containerd.runc.v1.CheckpointOptions + (*ProcessDetails)(nil), // 2: containerd.runc.v1.ProcessDetails +} +var file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_init() } +func file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_init() { + if File_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Options); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowOci - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } + file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CheckpointOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowOci - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthOci - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupOci - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthOci } - if depth == 0 { - return iNdEx, nil + file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ProcessDetails); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } } } - return 0, io.ErrUnexpectedEOF + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_goTypes, + DependencyIndexes: file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_depIdxs, + MessageInfos: file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_msgTypes, + }.Build() + File_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto = out.File + file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_rawDesc = nil + file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_goTypes = nil + file_github_com_containerd_containerd_runtime_v2_runc_options_oci_proto_depIdxs = nil } - -var ( - ErrInvalidLengthOci = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowOci = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupOci = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.proto b/vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.proto index 6b4bcf462c4cd..6270e99913072 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.proto +++ b/vendor/github.com/containerd/containerd/runtime/v2/runc/options/oci.proto @@ -2,8 +2,6 @@ syntax = "proto3"; package containerd.runc.v1; -import weak "gogoproto/gogo.proto"; - option go_package = "github.com/containerd/containerd/runtime/v2/runc/options;options"; message Options { @@ -21,8 +19,13 @@ message Options { string binary_name = 6; // runc root directory string root = 7; - // criu binary path - string criu_path = 8; + // criu binary path. + // + // Deprecated: runc option --criu is now ignored (with a warning), and the + // option will be removed entirely in a future release. Users who need a non- + // standard criu binary should rely on the standard way of looking up binaries + // in $PATH. + string criu_path = 8 [deprecated = true]; // enable systemd cgroups bool systemd_cgroup = 9; // criu image path diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/publisher.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/publisher.go index ed1ebdd58b9c7..5aa78a5d1b3d4 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/publisher.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/publisher.go @@ -25,8 +25,8 @@ import ( "github.com/containerd/containerd/events" "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/pkg/ttrpcutil" + "github.com/containerd/containerd/protobuf" "github.com/containerd/ttrpc" - "github.com/containerd/typeurl" "github.com/sirupsen/logrus" ) @@ -110,13 +110,13 @@ func (l *RemoteEventsPublisher) Publish(ctx context.Context, topic string, event if err != nil { return err } - any, err := typeurl.MarshalAny(event) + any, err := protobuf.MarshalAnyToProto(event) if err != nil { return err } i := &item{ ev: &v1.Envelope{ - Timestamp: time.Now(), + Timestamp: protobuf.ToTimestamp(time.Now()), Namespace: ns, Topic: topic, Event: any, diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go index 4ad964e2c4896..cf006d80545f9 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim.go @@ -22,21 +22,23 @@ import ( "flag" "fmt" "io" + "net" "os" + "path/filepath" "runtime" "runtime/debug" - "strings" "time" + shimapi "github.com/containerd/containerd/api/runtime/task/v2" "github.com/containerd/containerd/events" "github.com/containerd/containerd/log" "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/pkg/shutdown" "github.com/containerd/containerd/plugin" - shimapi "github.com/containerd/containerd/runtime/v2/task" + "github.com/containerd/containerd/protobuf" + "github.com/containerd/containerd/protobuf/proto" "github.com/containerd/containerd/version" "github.com/containerd/ttrpc" - "github.com/gogo/protobuf/proto" "github.com/sirupsen/logrus" ) @@ -49,9 +51,20 @@ type Publisher interface { // StartOpts describes shim start configuration received from containerd type StartOpts struct { ID string // TODO(2.0): Remove ID, passed directly to start for call symmetry - ContainerdBinary string + ContainerdBinary string // TODO(2.0): Remove ContainerdBinary, use the TTRPC_ADDRESS env to forward events Address string TTRPCAddress string + Debug bool +} + +// BootstrapParams is a JSON payload returned in stdout from shim.Start call. +type BootstrapParams struct { + // Version is the version of shim parameters (expected 2 for shim v2) + Version int `json:"version"` + // Address is a address containerd should use to connect to shim. + Address string `json:"address"` + // Protocol is either TTRPC or GRPC. + Protocol string `json:"protocol"` } type StopStatus struct { @@ -134,6 +147,9 @@ var ( const ( ttrpcAddressEnv = "TTRPC_ADDRESS" + grpcAddressEnv = "GRPC_ADDRESS" + namespaceEnv = "NAMESPACE" + maxVersionEnv = "MAX_SHIM_VERSION" ) func parseFlags() { @@ -145,7 +161,9 @@ func parseFlags() { flag.StringVar(&bundlePath, "bundle", "", "path to the bundle if not workdir") flag.StringVar(&addressFlag, "address", "", "grpc address back to main containerd") - flag.StringVar(&containerdBinaryFlag, "publish-binary", "containerd", "path to publish binary (used for publishing events)") + flag.StringVar(&containerdBinaryFlag, "publish-binary", "", + fmt.Sprintf("path to publish binary (used for publishing events), but %s will ignore this flag, please use the %s env", os.Args[0], ttrpcAddressEnv), + ) flag.Parse() action = flag.Arg(0) @@ -175,7 +193,7 @@ func setLogger(ctx context.Context, id string) (context.Context, error) { l.Logger.SetLevel(logrus.DebugLevel) } f, err := openLog(ctx, id) - if err != nil { //nolint:staticcheck // Ignore SA4023 as some platforms always return error + if err != nil { return ctx, err } l.Logger.SetOutput(f) @@ -223,11 +241,11 @@ func (stm shimToManager) Stop(ctx context.Context, id string) (StopStatus, error return StopStatus{ Pid: int(dr.Pid), ExitStatus: int(dr.ExitStatus), - ExitedAt: dr.ExitedAt, + ExitedAt: protobuf.FromTimestamp(dr.ExitedAt), }, nil } -// RunManager initialzes and runs a shim server +// RunManager initializes and runs a shim server. // TODO(2.0): Rename to Run func RunManager(ctx context.Context, manager Manager, opts ...BinaryOpts) { var config Config @@ -246,7 +264,7 @@ func RunManager(ctx context.Context, manager Manager, opts ...BinaryOpts) { func run(ctx context.Context, manager Manager, initFunc Init, name string, config Config) error { parseFlags() if versionFlag { - fmt.Printf("%s:\n", os.Args[0]) + fmt.Printf("%s:\n", filepath.Base(os.Args[0])) fmt.Println(" Version: ", version.Version) fmt.Println(" Revision:", version.Revision) fmt.Println(" Go version:", version.GoVersion) @@ -261,12 +279,12 @@ func run(ctx context.Context, manager Manager, initFunc Init, name string, confi setRuntime() signals, err := setupSignals(config) - if err != nil { //nolint:staticcheck // Ignore SA4023 as some platforms always return error + if err != nil { return err } if !config.NoSubreaper { - if err := subreaper(); err != nil { //nolint:staticcheck // Ignore SA4023 as some platforms always return error + if err := subreaper(); err != nil { return err } } @@ -307,7 +325,10 @@ func run(ctx context.Context, manager Manager, initFunc Init, name string, confi // Handle explicit actions switch action { case "delete": - logger := log.G(ctx).WithFields(logrus.Fields{ + if debugFlag { + logrus.SetLevel(logrus.DebugLevel) + } + logger := log.G(ctx).WithFields(log.Fields{ "pid": os.Getpid(), "namespace": namespaceFlag, }) @@ -319,7 +340,7 @@ func run(ctx context.Context, manager Manager, initFunc Init, name string, confi data, err := proto.Marshal(&shimapi.DeleteResponse{ Pid: uint32(ss.Pid), ExitStatus: uint32(ss.ExitStatus), - ExitedAt: ss.ExitedAt, + ExitedAt: protobuf.ToTimestamp(ss.ExitedAt), }) if err != nil { return err @@ -330,9 +351,9 @@ func run(ctx context.Context, manager Manager, initFunc Init, name string, confi return nil case "start": opts := StartOpts{ - ContainerdBinary: containerdBinaryFlag, - Address: addressFlag, - TTRPCAddress: ttrpcAddress, + Address: addressFlag, + TTRPCAddress: ttrpcAddress, + Debug: debugFlag, } address, err := manager.Start(ctx, id, opts) @@ -395,14 +416,14 @@ func run(ctx context.Context, manager Manager, initFunc Init, name string, confi initContext.TTRPCAddress = ttrpcAddress // load the plugin specific configuration if it is provided - //TODO: Read configuration passed into shim, or from state directory? - //if p.Config != nil { + // TODO: Read configuration passed into shim, or from state directory? + // if p.Config != nil { // pc, err := config.Decode(p) // if err != nil { // return nil, err // } // initContext.Config = pc - //} + // } result := p.Init(initContext) if err := initialized.Add(result); err != nil { @@ -445,7 +466,7 @@ func run(ctx context.Context, manager Manager, initFunc Init, name string, confi } } - if err := serve(ctx, server, signals, sd.Shutdown); err != nil { //nolint:staticcheck // Ignore SA4023 as some platforms always return error + if err := serve(ctx, server, signals, sd.Shutdown); err != nil { if err != shutdown.ErrShutdown { return err } @@ -477,17 +498,16 @@ func serve(ctx context.Context, server *ttrpc.Server, signals chan os.Signal, sh } l, err := serveListener(socketFlag) - if err != nil { //nolint:staticcheck // Ignore SA4023 as some platforms always return error + if err != nil { return err } go func() { defer l.Close() - if err := server.Serve(ctx, l); err != nil && - !strings.Contains(err.Error(), "use of closed network connection") { + if err := server.Serve(ctx, l); err != nil && !errors.Is(err, net.ErrClosed) { log.G(ctx).WithError(err).Fatal("containerd-shim: ttrpc server failure") } }() - logger := log.G(ctx).WithFields(logrus.Fields{ + logger := log.G(ctx).WithFields(log.Fields{ "pid": os.Getpid(), "path": path, "namespace": namespaceFlag, diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go index e2dab0931ea6a..90a7dbb07202c 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. @@ -72,7 +71,7 @@ func serveListener(path string) (net.Listener, error) { } func reap(ctx context.Context, logger *logrus.Entry, signals chan os.Signal) error { - logger.Info("starting signal loop") + logger.Debug("starting signal loop") for { select { diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go index 2add7ac33fed7..a290a06fb6246 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/shim_windows.go @@ -18,41 +18,41 @@ package shim import ( "context" - "errors" "io" "net" "os" + "github.com/containerd/containerd/errdefs" "github.com/containerd/ttrpc" "github.com/sirupsen/logrus" ) func setupSignals(config Config) (chan os.Signal, error) { - return nil, errors.New("not supported") + return nil, errdefs.ErrNotImplemented } func newServer(opts ...ttrpc.ServerOpt) (*ttrpc.Server, error) { - return nil, errors.New("not supported") + return nil, errdefs.ErrNotImplemented } func subreaper() error { - return errors.New("not supported") + return errdefs.ErrNotImplemented } func setupDumpStacks(dump chan<- os.Signal) { } func serveListener(path string) (net.Listener, error) { - return nil, errors.New("not supported") + return nil, errdefs.ErrNotImplemented } func reap(ctx context.Context, logger *logrus.Entry, signals chan os.Signal) error { - return errors.New("not supported") + return errdefs.ErrNotImplemented } func handleExitSignals(ctx context.Context, logger *logrus.Entry, cancel context.CancelFunc) { } func openLog(ctx context.Context, _ string) (io.Writer, error) { - return nil, errors.New("not supported") + return nil, errdefs.ErrNotImplemented } diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go index 1a0d41f23172e..cb2b97f86e3ef 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/util.go @@ -21,16 +21,19 @@ import ( "context" "errors" "fmt" + "io" "net" "os" "path/filepath" "strings" "time" + "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/protobuf/proto" + "github.com/containerd/containerd/protobuf/types" "github.com/containerd/ttrpc" - "github.com/gogo/protobuf/proto" - "github.com/gogo/protobuf/types" + "github.com/containerd/typeurl/v2" exec "golang.org/x/sys/execabs" ) @@ -65,7 +68,10 @@ func Command(ctx context.Context, config *CommandConfig) (*exec.Cmd, error) { cmd.Env = append( os.Environ(), "GOMAXPROCS=2", + fmt.Sprintf("%s=2", maxVersionEnv), fmt.Sprintf("%s=%s", ttrpcAddressEnv, config.TTRPCAddress), + fmt.Sprintf("%s=%s", grpcAddressEnv, config.Address), + fmt.Sprintf("%s=%s", namespaceEnv, ns), ) if config.SchedCore { cmd.Env = append(cmd.Env, "SCHED_CORE=1") @@ -86,7 +92,7 @@ func Command(ctx context.Context, config *CommandConfig) (*exec.Cmd, error) { func BinaryName(runtime string) string { // runtime name should format like $prefix.name.version parts := strings.Split(runtime, ".") - if len(parts) < 2 { + if len(parts) < 2 || parts[0] == "" { return "" } @@ -169,6 +175,41 @@ func ReadAddress(path string) (string, error) { return string(data), nil } +// ReadRuntimeOptions reads config bytes from io.Reader and unmarshals it into the provided type. +// The type must be registered with typeurl. +// +// The function will return ErrNotFound, if the config is not provided. +// And ErrInvalidArgument, if unable to cast the config to the provided type T. +func ReadRuntimeOptions[T any](reader io.Reader) (T, error) { + var config T + + data, err := io.ReadAll(reader) + if err != nil { + return config, fmt.Errorf("failed to read config bytes from stdin: %w", err) + } + + if len(data) == 0 { + return config, errdefs.ErrNotFound + } + + var any types.Any + if err := proto.Unmarshal(data, &any); err != nil { + return config, err + } + + v, err := typeurl.UnmarshalAny(&any) + if err != nil { + return config, err + } + + config, ok := v.(T) + if !ok { + return config, fmt.Errorf("invalid type %T: %w", v, errdefs.ErrInvalidArgument) + } + + return config, nil +} + // chainUnaryServerInterceptors creates a single ttrpc server interceptor from // a chain of many interceptors executed from first to last. func chainUnaryServerInterceptors(interceptors ...ttrpc.UnaryServerInterceptor) ttrpc.UnaryServerInterceptor { diff --git a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go index 4e2309a806950..ac470f914ccc2 100644 --- a/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go +++ b/vendor/github.com/containerd/containerd/runtime/v2/shim/util_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. @@ -26,6 +25,7 @@ import ( "net" "os" "path/filepath" + "runtime" "strings" "syscall" "time" @@ -87,15 +87,20 @@ func AnonReconnectDialer(address string, timeout time.Duration) (net.Conn, error // NewSocket returns a new socket func NewSocket(address string) (*net.UnixListener, error) { var ( - sock = socket(address) - path = sock.path() + sock = socket(address) + path = sock.path() + isAbstract = sock.isAbstract() + perm = os.FileMode(0600) ) - isAbstract := sock.isAbstract() + // Darwin needs +x to access socket, otherwise it'll fail with "bind: permission denied" when running as non-root. + if runtime.GOOS == "darwin" { + perm = 0700 + } if !isAbstract { - if err := os.MkdirAll(filepath.Dir(path), 0600); err != nil { - return nil, fmt.Errorf("%s: %w", path, err) + if err := os.MkdirAll(filepath.Dir(path), perm); err != nil { + return nil, fmt.Errorf("mkdir failed for %s: %w", path, err) } } l, err := net.Listen("unix", path) @@ -104,12 +109,13 @@ func NewSocket(address string) (*net.UnixListener, error) { } if !isAbstract { - if err := os.Chmod(path, 0600); err != nil { + if err := os.Chmod(path, perm); err != nil { os.Remove(sock.path()) l.Close() - return nil, err + return nil, fmt.Errorf("chmod failed for %s: %w", path, err) } } + return l.(*net.UnixListener), nil } diff --git a/vendor/github.com/containerd/containerd/runtime/v2/task/shim.pb.go b/vendor/github.com/containerd/containerd/runtime/v2/task/shim.pb.go deleted file mode 100644 index 6366f9c57f478..0000000000000 --- a/vendor/github.com/containerd/containerd/runtime/v2/task/shim.pb.go +++ /dev/null @@ -1,7312 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: github.com/containerd/containerd/runtime/v2/task/shim.proto - -package task - -import ( - context "context" - fmt "fmt" - types "github.com/containerd/containerd/api/types" - task "github.com/containerd/containerd/api/types/task" - github_com_containerd_ttrpc "github.com/containerd/ttrpc" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types1 "github.com/gogo/protobuf/types" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" - time "time" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type CreateTaskRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Bundle string `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"` - Rootfs []*types.Mount `protobuf:"bytes,3,rep,name=rootfs,proto3" json:"rootfs,omitempty"` - Terminal bool `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"` - Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"` - Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"` - Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"` - Checkpoint string `protobuf:"bytes,8,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"` - ParentCheckpoint string `protobuf:"bytes,9,opt,name=parent_checkpoint,json=parentCheckpoint,proto3" json:"parent_checkpoint,omitempty"` - Options *types1.Any `protobuf:"bytes,10,opt,name=options,proto3" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CreateTaskRequest) Reset() { *m = CreateTaskRequest{} } -func (*CreateTaskRequest) ProtoMessage() {} -func (*CreateTaskRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{0} -} -func (m *CreateTaskRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CreateTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CreateTaskRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CreateTaskRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateTaskRequest.Merge(m, src) -} -func (m *CreateTaskRequest) XXX_Size() int { - return m.Size() -} -func (m *CreateTaskRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CreateTaskRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateTaskRequest proto.InternalMessageInfo - -type CreateTaskResponse struct { - Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CreateTaskResponse) Reset() { *m = CreateTaskResponse{} } -func (*CreateTaskResponse) ProtoMessage() {} -func (*CreateTaskResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{1} -} -func (m *CreateTaskResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CreateTaskResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CreateTaskResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CreateTaskResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateTaskResponse.Merge(m, src) -} -func (m *CreateTaskResponse) XXX_Size() int { - return m.Size() -} -func (m *CreateTaskResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CreateTaskResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_CreateTaskResponse proto.InternalMessageInfo - -type DeleteRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } -func (*DeleteRequest) ProtoMessage() {} -func (*DeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{2} -} -func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DeleteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteRequest.Merge(m, src) -} -func (m *DeleteRequest) XXX_Size() int { - return m.Size() -} -func (m *DeleteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo - -type DeleteResponse struct { - Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` - ExitStatus uint32 `protobuf:"varint,2,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` - ExitedAt time.Time `protobuf:"bytes,3,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } -func (*DeleteResponse) ProtoMessage() {} -func (*DeleteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{3} -} -func (m *DeleteResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DeleteResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteResponse.Merge(m, src) -} -func (m *DeleteResponse) XXX_Size() int { - return m.Size() -} -func (m *DeleteResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteResponse proto.InternalMessageInfo - -type ExecProcessRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - Terminal bool `protobuf:"varint,3,opt,name=terminal,proto3" json:"terminal,omitempty"` - Stdin string `protobuf:"bytes,4,opt,name=stdin,proto3" json:"stdin,omitempty"` - Stdout string `protobuf:"bytes,5,opt,name=stdout,proto3" json:"stdout,omitempty"` - Stderr string `protobuf:"bytes,6,opt,name=stderr,proto3" json:"stderr,omitempty"` - Spec *types1.Any `protobuf:"bytes,7,opt,name=spec,proto3" json:"spec,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExecProcessRequest) Reset() { *m = ExecProcessRequest{} } -func (*ExecProcessRequest) ProtoMessage() {} -func (*ExecProcessRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{4} -} -func (m *ExecProcessRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExecProcessRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExecProcessRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExecProcessRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExecProcessRequest.Merge(m, src) -} -func (m *ExecProcessRequest) XXX_Size() int { - return m.Size() -} -func (m *ExecProcessRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ExecProcessRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ExecProcessRequest proto.InternalMessageInfo - -type ExecProcessResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExecProcessResponse) Reset() { *m = ExecProcessResponse{} } -func (*ExecProcessResponse) ProtoMessage() {} -func (*ExecProcessResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{5} -} -func (m *ExecProcessResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExecProcessResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExecProcessResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExecProcessResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExecProcessResponse.Merge(m, src) -} -func (m *ExecProcessResponse) XXX_Size() int { - return m.Size() -} -func (m *ExecProcessResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ExecProcessResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ExecProcessResponse proto.InternalMessageInfo - -type ResizePtyRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - Width uint32 `protobuf:"varint,3,opt,name=width,proto3" json:"width,omitempty"` - Height uint32 `protobuf:"varint,4,opt,name=height,proto3" json:"height,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResizePtyRequest) Reset() { *m = ResizePtyRequest{} } -func (*ResizePtyRequest) ProtoMessage() {} -func (*ResizePtyRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{6} -} -func (m *ResizePtyRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResizePtyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResizePtyRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResizePtyRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResizePtyRequest.Merge(m, src) -} -func (m *ResizePtyRequest) XXX_Size() int { - return m.Size() -} -func (m *ResizePtyRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ResizePtyRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ResizePtyRequest proto.InternalMessageInfo - -type StateRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StateRequest) Reset() { *m = StateRequest{} } -func (*StateRequest) ProtoMessage() {} -func (*StateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{7} -} -func (m *StateRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StateRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StateRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StateRequest.Merge(m, src) -} -func (m *StateRequest) XXX_Size() int { - return m.Size() -} -func (m *StateRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StateRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_StateRequest proto.InternalMessageInfo - -type StateResponse struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Bundle string `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"` - Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` - Status task.Status `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"` - Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"` - Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"` - Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"` - Terminal bool `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"` - ExitStatus uint32 `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` - ExitedAt time.Time `protobuf:"bytes,10,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"` - ExecID string `protobuf:"bytes,11,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StateResponse) Reset() { *m = StateResponse{} } -func (*StateResponse) ProtoMessage() {} -func (*StateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{8} -} -func (m *StateResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StateResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StateResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StateResponse.Merge(m, src) -} -func (m *StateResponse) XXX_Size() int { - return m.Size() -} -func (m *StateResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StateResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_StateResponse proto.InternalMessageInfo - -type KillRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - Signal uint32 `protobuf:"varint,3,opt,name=signal,proto3" json:"signal,omitempty"` - All bool `protobuf:"varint,4,opt,name=all,proto3" json:"all,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *KillRequest) Reset() { *m = KillRequest{} } -func (*KillRequest) ProtoMessage() {} -func (*KillRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{9} -} -func (m *KillRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *KillRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_KillRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *KillRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_KillRequest.Merge(m, src) -} -func (m *KillRequest) XXX_Size() int { - return m.Size() -} -func (m *KillRequest) XXX_DiscardUnknown() { - xxx_messageInfo_KillRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_KillRequest proto.InternalMessageInfo - -type CloseIORequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - Stdin bool `protobuf:"varint,3,opt,name=stdin,proto3" json:"stdin,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CloseIORequest) Reset() { *m = CloseIORequest{} } -func (*CloseIORequest) ProtoMessage() {} -func (*CloseIORequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{10} -} -func (m *CloseIORequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CloseIORequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CloseIORequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CloseIORequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CloseIORequest.Merge(m, src) -} -func (m *CloseIORequest) XXX_Size() int { - return m.Size() -} -func (m *CloseIORequest) XXX_DiscardUnknown() { - xxx_messageInfo_CloseIORequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CloseIORequest proto.InternalMessageInfo - -type PidsRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PidsRequest) Reset() { *m = PidsRequest{} } -func (*PidsRequest) ProtoMessage() {} -func (*PidsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{11} -} -func (m *PidsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PidsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PidsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PidsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PidsRequest.Merge(m, src) -} -func (m *PidsRequest) XXX_Size() int { - return m.Size() -} -func (m *PidsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PidsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_PidsRequest proto.InternalMessageInfo - -type PidsResponse struct { - Processes []*task.ProcessInfo `protobuf:"bytes,1,rep,name=processes,proto3" json:"processes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PidsResponse) Reset() { *m = PidsResponse{} } -func (*PidsResponse) ProtoMessage() {} -func (*PidsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{12} -} -func (m *PidsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PidsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PidsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PidsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_PidsResponse.Merge(m, src) -} -func (m *PidsResponse) XXX_Size() int { - return m.Size() -} -func (m *PidsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_PidsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_PidsResponse proto.InternalMessageInfo - -type CheckpointTaskRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` - Options *types1.Any `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CheckpointTaskRequest) Reset() { *m = CheckpointTaskRequest{} } -func (*CheckpointTaskRequest) ProtoMessage() {} -func (*CheckpointTaskRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{13} -} -func (m *CheckpointTaskRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CheckpointTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CheckpointTaskRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CheckpointTaskRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CheckpointTaskRequest.Merge(m, src) -} -func (m *CheckpointTaskRequest) XXX_Size() int { - return m.Size() -} -func (m *CheckpointTaskRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CheckpointTaskRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CheckpointTaskRequest proto.InternalMessageInfo - -type UpdateTaskRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Resources *types1.Any `protobuf:"bytes,2,opt,name=resources,proto3" json:"resources,omitempty"` - Annotations map[string]string `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UpdateTaskRequest) Reset() { *m = UpdateTaskRequest{} } -func (*UpdateTaskRequest) ProtoMessage() {} -func (*UpdateTaskRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{14} -} -func (m *UpdateTaskRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UpdateTaskRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UpdateTaskRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UpdateTaskRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateTaskRequest.Merge(m, src) -} -func (m *UpdateTaskRequest) XXX_Size() int { - return m.Size() -} -func (m *UpdateTaskRequest) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateTaskRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_UpdateTaskRequest proto.InternalMessageInfo - -type StartRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StartRequest) Reset() { *m = StartRequest{} } -func (*StartRequest) ProtoMessage() {} -func (*StartRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{15} -} -func (m *StartRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StartRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StartRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StartRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StartRequest.Merge(m, src) -} -func (m *StartRequest) XXX_Size() int { - return m.Size() -} -func (m *StartRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StartRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_StartRequest proto.InternalMessageInfo - -type StartResponse struct { - Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StartResponse) Reset() { *m = StartResponse{} } -func (*StartResponse) ProtoMessage() {} -func (*StartResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{16} -} -func (m *StartResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StartResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StartResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StartResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StartResponse.Merge(m, src) -} -func (m *StartResponse) XXX_Size() int { - return m.Size() -} -func (m *StartResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StartResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_StartResponse proto.InternalMessageInfo - -type WaitRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - ExecID string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WaitRequest) Reset() { *m = WaitRequest{} } -func (*WaitRequest) ProtoMessage() {} -func (*WaitRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{17} -} -func (m *WaitRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WaitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WaitRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WaitRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WaitRequest.Merge(m, src) -} -func (m *WaitRequest) XXX_Size() int { - return m.Size() -} -func (m *WaitRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WaitRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_WaitRequest proto.InternalMessageInfo - -type WaitResponse struct { - ExitStatus uint32 `protobuf:"varint,1,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"` - ExitedAt time.Time `protobuf:"bytes,2,opt,name=exited_at,json=exitedAt,proto3,stdtime" json:"exited_at"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WaitResponse) Reset() { *m = WaitResponse{} } -func (*WaitResponse) ProtoMessage() {} -func (*WaitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{18} -} -func (m *WaitResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WaitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WaitResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WaitResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_WaitResponse.Merge(m, src) -} -func (m *WaitResponse) XXX_Size() int { - return m.Size() -} -func (m *WaitResponse) XXX_DiscardUnknown() { - xxx_messageInfo_WaitResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_WaitResponse proto.InternalMessageInfo - -type StatsRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StatsRequest) Reset() { *m = StatsRequest{} } -func (*StatsRequest) ProtoMessage() {} -func (*StatsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{19} -} -func (m *StatsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StatsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StatsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatsRequest.Merge(m, src) -} -func (m *StatsRequest) XXX_Size() int { - return m.Size() -} -func (m *StatsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StatsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_StatsRequest proto.InternalMessageInfo - -type StatsResponse struct { - Stats *types1.Any `protobuf:"bytes,1,opt,name=stats,proto3" json:"stats,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StatsResponse) Reset() { *m = StatsResponse{} } -func (*StatsResponse) ProtoMessage() {} -func (*StatsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{20} -} -func (m *StatsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StatsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StatsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatsResponse.Merge(m, src) -} -func (m *StatsResponse) XXX_Size() int { - return m.Size() -} -func (m *StatsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StatsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_StatsResponse proto.InternalMessageInfo - -type ConnectRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConnectRequest) Reset() { *m = ConnectRequest{} } -func (*ConnectRequest) ProtoMessage() {} -func (*ConnectRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{21} -} -func (m *ConnectRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ConnectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ConnectRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ConnectRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConnectRequest.Merge(m, src) -} -func (m *ConnectRequest) XXX_Size() int { - return m.Size() -} -func (m *ConnectRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ConnectRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ConnectRequest proto.InternalMessageInfo - -type ConnectResponse struct { - ShimPid uint32 `protobuf:"varint,1,opt,name=shim_pid,json=shimPid,proto3" json:"shim_pid,omitempty"` - TaskPid uint32 `protobuf:"varint,2,opt,name=task_pid,json=taskPid,proto3" json:"task_pid,omitempty"` - Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ConnectResponse) Reset() { *m = ConnectResponse{} } -func (*ConnectResponse) ProtoMessage() {} -func (*ConnectResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{22} -} -func (m *ConnectResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ConnectResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ConnectResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ConnectResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConnectResponse.Merge(m, src) -} -func (m *ConnectResponse) XXX_Size() int { - return m.Size() -} -func (m *ConnectResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ConnectResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ConnectResponse proto.InternalMessageInfo - -type ShutdownRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Now bool `protobuf:"varint,2,opt,name=now,proto3" json:"now,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ShutdownRequest) Reset() { *m = ShutdownRequest{} } -func (*ShutdownRequest) ProtoMessage() {} -func (*ShutdownRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{23} -} -func (m *ShutdownRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ShutdownRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ShutdownRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ShutdownRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ShutdownRequest.Merge(m, src) -} -func (m *ShutdownRequest) XXX_Size() int { - return m.Size() -} -func (m *ShutdownRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ShutdownRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ShutdownRequest proto.InternalMessageInfo - -type PauseRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PauseRequest) Reset() { *m = PauseRequest{} } -func (*PauseRequest) ProtoMessage() {} -func (*PauseRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{24} -} -func (m *PauseRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PauseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PauseRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PauseRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PauseRequest.Merge(m, src) -} -func (m *PauseRequest) XXX_Size() int { - return m.Size() -} -func (m *PauseRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PauseRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_PauseRequest proto.InternalMessageInfo - -type ResumeRequest struct { - ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResumeRequest) Reset() { *m = ResumeRequest{} } -func (*ResumeRequest) ProtoMessage() {} -func (*ResumeRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9202ee34bc3ad8ca, []int{25} -} -func (m *ResumeRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResumeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResumeRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResumeRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResumeRequest.Merge(m, src) -} -func (m *ResumeRequest) XXX_Size() int { - return m.Size() -} -func (m *ResumeRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ResumeRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ResumeRequest proto.InternalMessageInfo - -func init() { - proto.RegisterType((*CreateTaskRequest)(nil), "containerd.task.v2.CreateTaskRequest") - proto.RegisterType((*CreateTaskResponse)(nil), "containerd.task.v2.CreateTaskResponse") - proto.RegisterType((*DeleteRequest)(nil), "containerd.task.v2.DeleteRequest") - proto.RegisterType((*DeleteResponse)(nil), "containerd.task.v2.DeleteResponse") - proto.RegisterType((*ExecProcessRequest)(nil), "containerd.task.v2.ExecProcessRequest") - proto.RegisterType((*ExecProcessResponse)(nil), "containerd.task.v2.ExecProcessResponse") - proto.RegisterType((*ResizePtyRequest)(nil), "containerd.task.v2.ResizePtyRequest") - proto.RegisterType((*StateRequest)(nil), "containerd.task.v2.StateRequest") - proto.RegisterType((*StateResponse)(nil), "containerd.task.v2.StateResponse") - proto.RegisterType((*KillRequest)(nil), "containerd.task.v2.KillRequest") - proto.RegisterType((*CloseIORequest)(nil), "containerd.task.v2.CloseIORequest") - proto.RegisterType((*PidsRequest)(nil), "containerd.task.v2.PidsRequest") - proto.RegisterType((*PidsResponse)(nil), "containerd.task.v2.PidsResponse") - proto.RegisterType((*CheckpointTaskRequest)(nil), "containerd.task.v2.CheckpointTaskRequest") - proto.RegisterType((*UpdateTaskRequest)(nil), "containerd.task.v2.UpdateTaskRequest") - proto.RegisterMapType((map[string]string)(nil), "containerd.task.v2.UpdateTaskRequest.AnnotationsEntry") - proto.RegisterType((*StartRequest)(nil), "containerd.task.v2.StartRequest") - proto.RegisterType((*StartResponse)(nil), "containerd.task.v2.StartResponse") - proto.RegisterType((*WaitRequest)(nil), "containerd.task.v2.WaitRequest") - proto.RegisterType((*WaitResponse)(nil), "containerd.task.v2.WaitResponse") - proto.RegisterType((*StatsRequest)(nil), "containerd.task.v2.StatsRequest") - proto.RegisterType((*StatsResponse)(nil), "containerd.task.v2.StatsResponse") - proto.RegisterType((*ConnectRequest)(nil), "containerd.task.v2.ConnectRequest") - proto.RegisterType((*ConnectResponse)(nil), "containerd.task.v2.ConnectResponse") - proto.RegisterType((*ShutdownRequest)(nil), "containerd.task.v2.ShutdownRequest") - proto.RegisterType((*PauseRequest)(nil), "containerd.task.v2.PauseRequest") - proto.RegisterType((*ResumeRequest)(nil), "containerd.task.v2.ResumeRequest") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/runtime/v2/task/shim.proto", fileDescriptor_9202ee34bc3ad8ca) -} - -var fileDescriptor_9202ee34bc3ad8ca = []byte{ - // 1306 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x58, 0x4d, 0x6f, 0xdb, 0x46, - 0x13, 0x0e, 0xf5, 0x41, 0x49, 0xa3, 0xc8, 0x71, 0xf6, 0x75, 0xf2, 0x32, 0x0a, 0x20, 0x29, 0x4c, - 0x93, 0xaa, 0x2d, 0x40, 0xa1, 0x0a, 0x1a, 0x14, 0x31, 0x90, 0xc2, 0x76, 0xdc, 0x40, 0x4d, 0x5a, - 0x1b, 0x4c, 0x8a, 0x04, 0xbd, 0x18, 0xb4, 0xb8, 0x91, 0x08, 0x4b, 0x5c, 0x96, 0xbb, 0x74, 0xa2, - 0x02, 0x05, 0x7a, 0xea, 0xa1, 0xa7, 0xfe, 0xac, 0x1c, 0x0b, 0xf4, 0xd2, 0x4b, 0xd3, 0x46, 0xff, - 0xa0, 0xc7, 0xde, 0x8a, 0xfd, 0x90, 0x45, 0x49, 0xa4, 0x14, 0x07, 0xba, 0x18, 0x3b, 0xdc, 0x67, - 0x67, 0x67, 0x67, 0x9f, 0x79, 0x66, 0x65, 0xd8, 0xee, 0x79, 0xac, 0x1f, 0x1d, 0x5b, 0x5d, 0x32, - 0x6c, 0x75, 0x89, 0xcf, 0x1c, 0xcf, 0xc7, 0xa1, 0x1b, 0x1f, 0x86, 0x91, 0xcf, 0xbc, 0x21, 0x6e, - 0x9d, 0xb6, 0x5b, 0xcc, 0xa1, 0x27, 0x2d, 0xda, 0xf7, 0x86, 0x56, 0x10, 0x12, 0x46, 0x10, 0x9a, - 0xc2, 0x2c, 0x3e, 0x67, 0x9d, 0xb6, 0xab, 0xd7, 0x7a, 0x84, 0xf4, 0x06, 0xb8, 0x25, 0x10, 0xc7, - 0xd1, 0x8b, 0x96, 0xe3, 0x8f, 0x24, 0xbc, 0x7a, 0x7d, 0x7e, 0x0a, 0x0f, 0x03, 0x36, 0x99, 0xdc, - 0xea, 0x91, 0x1e, 0x11, 0xc3, 0x16, 0x1f, 0xa9, 0xaf, 0xf5, 0xf9, 0x25, 0x3c, 0x14, 0xca, 0x9c, - 0x61, 0xa0, 0x00, 0x77, 0x57, 0xc6, 0xef, 0x04, 0x5e, 0x8b, 0x8d, 0x02, 0x4c, 0x5b, 0x43, 0x12, - 0xf9, 0x4c, 0xad, 0xbb, 0x77, 0x8e, 0x75, 0xe2, 0xd8, 0xe2, 0x7c, 0x62, 0xad, 0xf9, 0x7b, 0x06, - 0x2e, 0xef, 0x85, 0xd8, 0x61, 0xf8, 0xa9, 0x43, 0x4f, 0x6c, 0xfc, 0x7d, 0x84, 0x29, 0x43, 0x57, - 0x21, 0xe3, 0xb9, 0x86, 0xd6, 0xd0, 0x9a, 0xa5, 0x5d, 0x7d, 0xfc, 0xa6, 0x9e, 0xe9, 0x3c, 0xb0, - 0x33, 0x9e, 0x8b, 0xae, 0x82, 0x7e, 0x1c, 0xf9, 0xee, 0x00, 0x1b, 0x19, 0x3e, 0x67, 0x2b, 0x0b, - 0xb5, 0x40, 0x0f, 0x09, 0x61, 0x2f, 0xa8, 0x91, 0x6d, 0x64, 0x9b, 0xe5, 0xf6, 0xff, 0xad, 0x78, - 0x36, 0xf9, 0xc6, 0xd6, 0xd7, 0x3c, 0x60, 0x5b, 0xc1, 0x50, 0x15, 0x8a, 0x0c, 0x87, 0x43, 0xcf, - 0x77, 0x06, 0x46, 0xae, 0xa1, 0x35, 0x8b, 0xf6, 0x99, 0x8d, 0xb6, 0x20, 0x4f, 0x99, 0xeb, 0xf9, - 0x46, 0x5e, 0xec, 0x21, 0x0d, 0xbe, 0x35, 0x65, 0x2e, 0x89, 0x98, 0xa1, 0xcb, 0xad, 0xa5, 0xa5, - 0xbe, 0xe3, 0x30, 0x34, 0x0a, 0x67, 0xdf, 0x71, 0x18, 0xa2, 0x1a, 0x40, 0xb7, 0x8f, 0xbb, 0x27, - 0x01, 0xf1, 0x7c, 0x66, 0x14, 0xc5, 0x5c, 0xec, 0x0b, 0xfa, 0x04, 0x2e, 0x07, 0x4e, 0x88, 0x7d, - 0x76, 0x14, 0x83, 0x95, 0x04, 0x6c, 0x53, 0x4e, 0xec, 0x4d, 0xc1, 0x16, 0x14, 0x48, 0xc0, 0x3c, - 0xe2, 0x53, 0x03, 0x1a, 0x5a, 0xb3, 0xdc, 0xde, 0xb2, 0xe4, 0x65, 0x5a, 0x93, 0xcb, 0xb4, 0x76, - 0xfc, 0x91, 0x3d, 0x01, 0x99, 0xb7, 0x01, 0xc5, 0x93, 0x4a, 0x03, 0xe2, 0x53, 0x8c, 0x36, 0x21, - 0x1b, 0xa8, 0xb4, 0x56, 0x6c, 0x3e, 0x34, 0x1f, 0x43, 0xe5, 0x01, 0x1e, 0x60, 0x86, 0x57, 0x25, - 0xfe, 0x26, 0x14, 0xf0, 0x2b, 0xdc, 0x3d, 0xf2, 0x5c, 0x99, 0xf9, 0x5d, 0x18, 0xbf, 0xa9, 0xeb, - 0xfb, 0xaf, 0x70, 0xb7, 0xf3, 0xc0, 0xd6, 0xf9, 0x54, 0xc7, 0x35, 0x7f, 0xd6, 0x60, 0x63, 0xe2, - 0x2e, 0x6d, 0x4b, 0x54, 0x87, 0x32, 0x7e, 0xe5, 0xb1, 0x23, 0xca, 0x1c, 0x16, 0x51, 0xe1, 0xad, - 0x62, 0x03, 0xff, 0xf4, 0x44, 0x7c, 0x41, 0x3b, 0x50, 0xe2, 0x16, 0x76, 0x8f, 0x1c, 0x66, 0x64, - 0xc5, 0x69, 0xab, 0x0b, 0xa7, 0x7d, 0x3a, 0xa1, 0xee, 0x6e, 0xf1, 0xf5, 0x9b, 0xfa, 0x85, 0x5f, - 0xff, 0xaa, 0x6b, 0x76, 0x51, 0x2e, 0xdb, 0x61, 0xe6, 0x9f, 0x1a, 0x20, 0x1e, 0xdb, 0x61, 0x48, - 0xba, 0x98, 0xd2, 0x75, 0x1c, 0x6e, 0x86, 0x31, 0xd9, 0x34, 0xc6, 0xe4, 0x92, 0x19, 0x93, 0x4f, - 0x61, 0x8c, 0x3e, 0xc3, 0x98, 0x26, 0xe4, 0x68, 0x80, 0xbb, 0x82, 0x47, 0x69, 0x37, 0x2c, 0x10, - 0xe6, 0x15, 0xf8, 0xdf, 0xcc, 0xf1, 0x64, 0xb2, 0xcd, 0x1f, 0x61, 0xd3, 0xc6, 0xd4, 0xfb, 0x01, - 0x1f, 0xb2, 0xd1, 0x5a, 0xce, 0xbc, 0x05, 0xf9, 0x97, 0x9e, 0xcb, 0xfa, 0xe2, 0xc0, 0x15, 0x5b, - 0x1a, 0x3c, 0xfe, 0x3e, 0xf6, 0x7a, 0x7d, 0x26, 0x8e, 0x5b, 0xb1, 0x95, 0x65, 0x3e, 0x82, 0x8b, - 0xfc, 0x0a, 0xd7, 0xc3, 0xa5, 0x7f, 0x32, 0x50, 0x51, 0xde, 0x14, 0x95, 0xce, 0xab, 0x09, 0x8a, - 0x7a, 0xd9, 0x29, 0xf5, 0xee, 0xf0, 0xc4, 0x0b, 0xd6, 0xf1, 0xc0, 0x37, 0xda, 0xd7, 0xe3, 0x2a, - 0x71, 0xfa, 0xa9, 0x12, 0x0a, 0x49, 0x43, 0x5b, 0x41, 0xd7, 0xa4, 0x06, 0x71, 0xf6, 0x14, 0xe7, - 0xd8, 0x33, 0x57, 0x11, 0xa5, 0xe5, 0x15, 0x01, 0xef, 0x53, 0x11, 0xf1, 0x9c, 0x97, 0x53, 0x73, - 0xce, 0xa0, 0xfc, 0xc8, 0x1b, 0x0c, 0xd6, 0x42, 0x1d, 0x9e, 0x08, 0xaf, 0x37, 0x29, 0x96, 0x8a, - 0xad, 0x2c, 0x7e, 0x2b, 0xce, 0x60, 0xa2, 0xb9, 0x7c, 0x68, 0x76, 0x61, 0x63, 0x6f, 0x40, 0x28, - 0xee, 0x1c, 0xac, 0x8b, 0xb3, 0xf2, 0xbe, 0x64, 0x91, 0x4a, 0xc3, 0xbc, 0x05, 0xe5, 0x43, 0xcf, - 0x5d, 0xa5, 0x04, 0xe6, 0x37, 0x70, 0x51, 0xc2, 0x14, 0xe7, 0xee, 0x43, 0x29, 0x90, 0x45, 0x86, - 0xa9, 0xa1, 0x89, 0xd6, 0xd2, 0x48, 0x24, 0x8d, 0x2a, 0xc5, 0x8e, 0xff, 0x82, 0xd8, 0xd3, 0x25, - 0x26, 0x85, 0x2b, 0x53, 0x15, 0x7f, 0x97, 0x06, 0x87, 0x20, 0x17, 0x38, 0xac, 0xaf, 0xa8, 0x2c, - 0xc6, 0x71, 0xf1, 0xcf, 0xbe, 0x8b, 0xf8, 0xff, 0xab, 0xc1, 0xe5, 0x6f, 0x03, 0xf7, 0x1d, 0x5b, - 0x6a, 0x1b, 0x4a, 0x21, 0xa6, 0x24, 0x0a, 0xbb, 0x58, 0xaa, 0x71, 0x9a, 0xff, 0x29, 0x0c, 0x3d, - 0x87, 0xb2, 0xe3, 0xfb, 0x84, 0x39, 0x93, 0xa8, 0x78, 0x62, 0xee, 0x5a, 0x8b, 0x2f, 0x18, 0x6b, - 0x21, 0x0e, 0x6b, 0x67, 0xba, 0x70, 0xdf, 0x67, 0xe1, 0xc8, 0x8e, 0xbb, 0xaa, 0xde, 0x87, 0xcd, - 0x79, 0x00, 0xa7, 0xcc, 0x09, 0x1e, 0xc9, 0xd0, 0x6d, 0x3e, 0xe4, 0x77, 0x7c, 0xea, 0x0c, 0xa2, - 0x49, 0xc5, 0x4b, 0xe3, 0x5e, 0xe6, 0x73, 0x4d, 0x69, 0x50, 0xc8, 0xd6, 0xa2, 0x41, 0x37, 0x84, - 0x04, 0x71, 0x67, 0xa9, 0x0d, 0xf4, 0x2b, 0x28, 0x3f, 0x73, 0xbc, 0xf5, 0x6c, 0x17, 0xc2, 0x45, - 0xe9, 0x4b, 0xed, 0x36, 0xa7, 0x0b, 0xda, 0x72, 0x5d, 0xc8, 0xbc, 0x57, 0xa7, 0xbc, 0x2d, 0x35, - 0x7b, 0x65, 0x61, 0x6c, 0x4b, 0x35, 0x9e, 0x56, 0xc6, 0xc7, 0xbc, 0xcc, 0x1c, 0x26, 0xc3, 0x4a, - 0xa3, 0x8c, 0x84, 0x98, 0x4d, 0xd8, 0xd8, 0x23, 0xbe, 0x8f, 0xbb, 0xab, 0xf2, 0x64, 0x3a, 0x70, - 0xe9, 0x0c, 0xa9, 0x36, 0xba, 0x06, 0x45, 0xfe, 0x4a, 0x3e, 0x9a, 0x26, 0xbe, 0xc0, 0xed, 0x43, - 0xcf, 0xe5, 0x53, 0x9c, 0x67, 0x62, 0x4a, 0xbe, 0x23, 0x0a, 0xdc, 0xe6, 0x53, 0x06, 0x14, 0x4e, - 0x71, 0x48, 0x3d, 0x22, 0x75, 0xa0, 0x64, 0x4f, 0x4c, 0x73, 0x1b, 0x2e, 0x3d, 0xe9, 0x47, 0xcc, - 0x25, 0x2f, 0xfd, 0x55, 0xb7, 0xb6, 0x09, 0x59, 0x9f, 0xbc, 0x14, 0xae, 0x8b, 0x36, 0x1f, 0xf2, - 0x74, 0x1d, 0x3a, 0x11, 0x5d, 0xd5, 0xe2, 0xcc, 0x0f, 0xa1, 0x62, 0x63, 0x1a, 0x0d, 0x57, 0x01, - 0xdb, 0xbf, 0x00, 0xe4, 0x78, 0x75, 0xa0, 0xc7, 0x90, 0x17, 0xed, 0x0e, 0x35, 0x92, 0xca, 0x28, - 0xde, 0x57, 0xab, 0x37, 0x96, 0x20, 0x54, 0xd2, 0x9e, 0x81, 0x2e, 0xdf, 0x7f, 0xe8, 0x56, 0x12, - 0x78, 0xe1, 0xc1, 0x5d, 0xbd, 0xbd, 0x0a, 0xa6, 0x1c, 0xcb, 0x30, 0x43, 0x96, 0x1a, 0xe6, 0x59, - 0xe9, 0xa5, 0x86, 0x19, 0xab, 0xa7, 0x03, 0xd0, 0xe5, 0x7b, 0x11, 0x25, 0x82, 0x67, 0x9e, 0xa6, - 0x55, 0x73, 0x19, 0x44, 0x39, 0xec, 0x40, 0x8e, 0xeb, 0x37, 0xaa, 0x27, 0x61, 0x63, 0x0d, 0xa0, - 0xda, 0x48, 0x07, 0x28, 0x57, 0x3b, 0x90, 0x17, 0x57, 0x9d, 0x7c, 0xd2, 0x38, 0x0b, 0xaa, 0x57, - 0x17, 0xc8, 0xbf, 0xcf, 0x7f, 0x8c, 0xa1, 0x3d, 0xd0, 0x25, 0x0b, 0x92, 0x8f, 0x37, 0xc3, 0x90, - 0x54, 0x27, 0x07, 0x00, 0xb1, 0x1f, 0x02, 0x1f, 0x25, 0xde, 0x53, 0x52, 0x8b, 0x49, 0x75, 0xf8, - 0x05, 0xe4, 0x78, 0x97, 0x4f, 0xce, 0x51, 0xac, 0xff, 0xa7, 0x3a, 0xf8, 0x12, 0x72, 0x5c, 0xb9, - 0x50, 0x22, 0x67, 0x16, 0x9f, 0xdd, 0xa9, 0x7e, 0x3a, 0x50, 0x3a, 0x7b, 0xae, 0xa2, 0x0f, 0x52, - 0x32, 0x34, 0xf3, 0x9a, 0x4d, 0x75, 0xb5, 0x0f, 0x05, 0xf5, 0x86, 0x40, 0x89, 0x34, 0x99, 0x7d, - 0x60, 0xa4, 0xba, 0x79, 0x08, 0xba, 0x6c, 0x58, 0xc9, 0x65, 0xb3, 0xd0, 0xcc, 0x96, 0x1c, 0x2d, - 0xc7, 0xa5, 0x3c, 0x39, 0xc7, 0xb1, 0x86, 0x91, 0xcc, 0xc3, 0x99, 0x2e, 0xa0, 0x84, 0x81, 0xa6, - 0x0b, 0x03, 0x5d, 0x29, 0x0c, 0x53, 0x56, 0xdb, 0x50, 0x50, 0x02, 0x9b, 0x92, 0xa8, 0x19, 0x9d, - 0xae, 0xde, 0x5c, 0x8a, 0x51, 0x3e, 0x1f, 0x42, 0x71, 0xa2, 0xa8, 0x28, 0x71, 0xc1, 0x9c, 0xde, - 0xa6, 0x65, 0x6d, 0xf7, 0xe0, 0xf5, 0xdb, 0xda, 0x85, 0x3f, 0xde, 0xd6, 0x2e, 0xfc, 0x34, 0xae, - 0x69, 0xaf, 0xc7, 0x35, 0xed, 0xb7, 0x71, 0x4d, 0xfb, 0x7b, 0x5c, 0xd3, 0xbe, 0xfb, 0xec, 0xbc, - 0xff, 0x59, 0xd9, 0xe6, 0x7f, 0x9e, 0x67, 0x8e, 0x75, 0xb1, 0xc5, 0x9d, 0xff, 0x02, 0x00, 0x00, - 0xff, 0xff, 0xd3, 0xbf, 0xc3, 0xa9, 0x9b, 0x11, 0x00, 0x00, -} - -func (m *CreateTaskRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CreateTaskRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CreateTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Options != nil { - { - size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintShim(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - } - if len(m.ParentCheckpoint) > 0 { - i -= len(m.ParentCheckpoint) - copy(dAtA[i:], m.ParentCheckpoint) - i = encodeVarintShim(dAtA, i, uint64(len(m.ParentCheckpoint))) - i-- - dAtA[i] = 0x4a - } - if len(m.Checkpoint) > 0 { - i -= len(m.Checkpoint) - copy(dAtA[i:], m.Checkpoint) - i = encodeVarintShim(dAtA, i, uint64(len(m.Checkpoint))) - i-- - dAtA[i] = 0x42 - } - if len(m.Stderr) > 0 { - i -= len(m.Stderr) - copy(dAtA[i:], m.Stderr) - i = encodeVarintShim(dAtA, i, uint64(len(m.Stderr))) - i-- - dAtA[i] = 0x3a - } - if len(m.Stdout) > 0 { - i -= len(m.Stdout) - copy(dAtA[i:], m.Stdout) - i = encodeVarintShim(dAtA, i, uint64(len(m.Stdout))) - i-- - dAtA[i] = 0x32 - } - if len(m.Stdin) > 0 { - i -= len(m.Stdin) - copy(dAtA[i:], m.Stdin) - i = encodeVarintShim(dAtA, i, uint64(len(m.Stdin))) - i-- - dAtA[i] = 0x2a - } - if m.Terminal { - i-- - if m.Terminal { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if len(m.Rootfs) > 0 { - for iNdEx := len(m.Rootfs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Rootfs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintShim(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Bundle) > 0 { - i -= len(m.Bundle) - copy(dAtA[i:], m.Bundle) - i = encodeVarintShim(dAtA, i, uint64(len(m.Bundle))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CreateTaskResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CreateTaskResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CreateTaskResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Pid != 0 { - i = encodeVarintShim(dAtA, i, uint64(m.Pid)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *DeleteRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DeleteResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt):]) - if err2 != nil { - return 0, err2 - } - i -= n2 - i = encodeVarintShim(dAtA, i, uint64(n2)) - i-- - dAtA[i] = 0x1a - if m.ExitStatus != 0 { - i = encodeVarintShim(dAtA, i, uint64(m.ExitStatus)) - i-- - dAtA[i] = 0x10 - } - if m.Pid != 0 { - i = encodeVarintShim(dAtA, i, uint64(m.Pid)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ExecProcessRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExecProcessRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExecProcessRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Spec != nil { - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintShim(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - if len(m.Stderr) > 0 { - i -= len(m.Stderr) - copy(dAtA[i:], m.Stderr) - i = encodeVarintShim(dAtA, i, uint64(len(m.Stderr))) - i-- - dAtA[i] = 0x32 - } - if len(m.Stdout) > 0 { - i -= len(m.Stdout) - copy(dAtA[i:], m.Stdout) - i = encodeVarintShim(dAtA, i, uint64(len(m.Stdout))) - i-- - dAtA[i] = 0x2a - } - if len(m.Stdin) > 0 { - i -= len(m.Stdin) - copy(dAtA[i:], m.Stdin) - i = encodeVarintShim(dAtA, i, uint64(len(m.Stdin))) - i-- - dAtA[i] = 0x22 - } - if m.Terminal { - i-- - if m.Terminal { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ExecProcessResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExecProcessResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExecProcessResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *ResizePtyRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResizePtyRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResizePtyRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Height != 0 { - i = encodeVarintShim(dAtA, i, uint64(m.Height)) - i-- - dAtA[i] = 0x20 - } - if m.Width != 0 { - i = encodeVarintShim(dAtA, i, uint64(m.Width)) - i-- - dAtA[i] = 0x18 - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *StateRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StateRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *StateResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StateResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x5a - } - n4, err4 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt):]) - if err4 != nil { - return 0, err4 - } - i -= n4 - i = encodeVarintShim(dAtA, i, uint64(n4)) - i-- - dAtA[i] = 0x52 - if m.ExitStatus != 0 { - i = encodeVarintShim(dAtA, i, uint64(m.ExitStatus)) - i-- - dAtA[i] = 0x48 - } - if m.Terminal { - i-- - if m.Terminal { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x40 - } - if len(m.Stderr) > 0 { - i -= len(m.Stderr) - copy(dAtA[i:], m.Stderr) - i = encodeVarintShim(dAtA, i, uint64(len(m.Stderr))) - i-- - dAtA[i] = 0x3a - } - if len(m.Stdout) > 0 { - i -= len(m.Stdout) - copy(dAtA[i:], m.Stdout) - i = encodeVarintShim(dAtA, i, uint64(len(m.Stdout))) - i-- - dAtA[i] = 0x32 - } - if len(m.Stdin) > 0 { - i -= len(m.Stdin) - copy(dAtA[i:], m.Stdin) - i = encodeVarintShim(dAtA, i, uint64(len(m.Stdin))) - i-- - dAtA[i] = 0x2a - } - if m.Status != 0 { - i = encodeVarintShim(dAtA, i, uint64(m.Status)) - i-- - dAtA[i] = 0x20 - } - if m.Pid != 0 { - i = encodeVarintShim(dAtA, i, uint64(m.Pid)) - i-- - dAtA[i] = 0x18 - } - if len(m.Bundle) > 0 { - i -= len(m.Bundle) - copy(dAtA[i:], m.Bundle) - i = encodeVarintShim(dAtA, i, uint64(len(m.Bundle))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *KillRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *KillRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *KillRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.All { - i-- - if m.All { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if m.Signal != 0 { - i = encodeVarintShim(dAtA, i, uint64(m.Signal)) - i-- - dAtA[i] = 0x18 - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CloseIORequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CloseIORequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CloseIORequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Stdin { - i-- - if m.Stdin { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PidsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PidsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PidsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PidsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PidsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PidsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Processes) > 0 { - for iNdEx := len(m.Processes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Processes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintShim(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *CheckpointTaskRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CheckpointTaskRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CheckpointTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Options != nil { - { - size, err := m.Options.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintShim(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.Path) > 0 { - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintShim(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *UpdateTaskRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UpdateTaskRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UpdateTaskRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Annotations) > 0 { - for k := range m.Annotations { - v := m.Annotations[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintShim(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintShim(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintShim(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a - } - } - if m.Resources != nil { - { - size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintShim(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *StartRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StartRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StartRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *StartResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StartResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StartResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Pid != 0 { - i = encodeVarintShim(dAtA, i, uint64(m.Pid)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *WaitRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WaitRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WaitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ExecID) > 0 { - i -= len(m.ExecID) - copy(dAtA[i:], m.ExecID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ExecID))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *WaitResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WaitResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WaitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt):]) - if err7 != nil { - return 0, err7 - } - i -= n7 - i = encodeVarintShim(dAtA, i, uint64(n7)) - i-- - dAtA[i] = 0x12 - if m.ExitStatus != 0 { - i = encodeVarintShim(dAtA, i, uint64(m.ExitStatus)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *StatsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *StatsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Stats != nil { - { - size, err := m.Stats.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintShim(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ConnectRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ConnectRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ConnectRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ConnectResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ConnectResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ConnectResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintShim(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x1a - } - if m.TaskPid != 0 { - i = encodeVarintShim(dAtA, i, uint64(m.TaskPid)) - i-- - dAtA[i] = 0x10 - } - if m.ShimPid != 0 { - i = encodeVarintShim(dAtA, i, uint64(m.ShimPid)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ShutdownRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ShutdownRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ShutdownRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Now { - i-- - if m.Now { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PauseRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PauseRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PauseRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ResumeRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResumeRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResumeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintShim(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintShim(dAtA []byte, offset int, v uint64) int { - offset -= sovShim(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *CreateTaskRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.Bundle) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if len(m.Rootfs) > 0 { - for _, e := range m.Rootfs { - l = e.Size() - n += 1 + l + sovShim(uint64(l)) - } - } - if m.Terminal { - n += 2 - } - l = len(m.Stdin) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.Stdout) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.Stderr) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.Checkpoint) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.ParentCheckpoint) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.Options != nil { - l = m.Options.Size() - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CreateTaskResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Pid != 0 { - n += 1 + sovShim(uint64(m.Pid)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DeleteRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DeleteResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Pid != 0 { - n += 1 + sovShim(uint64(m.Pid)) - } - if m.ExitStatus != 0 { - n += 1 + sovShim(uint64(m.ExitStatus)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt) - n += 1 + l + sovShim(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ExecProcessRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.Terminal { - n += 2 - } - l = len(m.Stdin) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.Stdout) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.Stderr) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.Spec != nil { - l = m.Spec.Size() - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ExecProcessResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ResizePtyRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.Width != 0 { - n += 1 + sovShim(uint64(m.Width)) - } - if m.Height != 0 { - n += 1 + sovShim(uint64(m.Height)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StateRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StateResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.Bundle) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.Pid != 0 { - n += 1 + sovShim(uint64(m.Pid)) - } - if m.Status != 0 { - n += 1 + sovShim(uint64(m.Status)) - } - l = len(m.Stdin) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.Stdout) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.Stderr) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.Terminal { - n += 2 - } - if m.ExitStatus != 0 { - n += 1 + sovShim(uint64(m.ExitStatus)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt) - n += 1 + l + sovShim(uint64(l)) - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *KillRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.Signal != 0 { - n += 1 + sovShim(uint64(m.Signal)) - } - if m.All { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CloseIORequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.Stdin { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PidsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PidsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Processes) > 0 { - for _, e := range m.Processes { - l = e.Size() - n += 1 + l + sovShim(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CheckpointTaskRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.Path) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.Options != nil { - l = m.Options.Size() - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UpdateTaskRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.Resources != nil { - l = m.Resources.Size() - n += 1 + l + sovShim(uint64(l)) - } - if len(m.Annotations) > 0 { - for k, v := range m.Annotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovShim(uint64(len(k))) + 1 + len(v) + sovShim(uint64(len(v))) - n += mapEntrySize + 1 + sovShim(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StartRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StartResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Pid != 0 { - n += 1 + sovShim(uint64(m.Pid)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *WaitRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - l = len(m.ExecID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *WaitResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ExitStatus != 0 { - n += 1 + sovShim(uint64(m.ExitStatus)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt) - n += 1 + l + sovShim(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StatsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StatsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Stats != nil { - l = m.Stats.Size() - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ConnectRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ConnectResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ShimPid != 0 { - n += 1 + sovShim(uint64(m.ShimPid)) - } - if m.TaskPid != 0 { - n += 1 + sovShim(uint64(m.TaskPid)) - } - l = len(m.Version) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ShutdownRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.Now { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PauseRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ResumeRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovShim(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovShim(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozShim(x uint64) (n int) { - return sovShim(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *CreateTaskRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForRootfs := "[]*Mount{" - for _, f := range this.Rootfs { - repeatedStringForRootfs += strings.Replace(fmt.Sprintf("%v", f), "Mount", "types.Mount", 1) + "," - } - repeatedStringForRootfs += "}" - s := strings.Join([]string{`&CreateTaskRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Bundle:` + fmt.Sprintf("%v", this.Bundle) + `,`, - `Rootfs:` + repeatedStringForRootfs + `,`, - `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, - `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, - `Checkpoint:` + fmt.Sprintf("%v", this.Checkpoint) + `,`, - `ParentCheckpoint:` + fmt.Sprintf("%v", this.ParentCheckpoint) + `,`, - `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "types1.Any", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CreateTaskResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CreateTaskResponse{`, - `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DeleteRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeleteRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DeleteResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeleteResponse{`, - `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, - `ExitedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExitedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ExecProcessRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExecProcessRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, - `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, - `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "Any", "types1.Any", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ExecProcessResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExecProcessResponse{`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ResizePtyRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResizePtyRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `Width:` + fmt.Sprintf("%v", this.Width) + `,`, - `Height:` + fmt.Sprintf("%v", this.Height) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *StateRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StateRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *StateResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StateResponse{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Bundle:` + fmt.Sprintf("%v", this.Bundle) + `,`, - `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, - `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, - `Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`, - `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, - `ExitedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExitedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *KillRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&KillRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `Signal:` + fmt.Sprintf("%v", this.Signal) + `,`, - `All:` + fmt.Sprintf("%v", this.All) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CloseIORequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CloseIORequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *PidsRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PidsRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *PidsResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForProcesses := "[]*ProcessInfo{" - for _, f := range this.Processes { - repeatedStringForProcesses += strings.Replace(fmt.Sprintf("%v", f), "ProcessInfo", "task.ProcessInfo", 1) + "," - } - repeatedStringForProcesses += "}" - s := strings.Join([]string{`&PidsResponse{`, - `Processes:` + repeatedStringForProcesses + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CheckpointTaskRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CheckpointTaskRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "types1.Any", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UpdateTaskRequest) String() string { - if this == nil { - return "nil" - } - keysForAnnotations := make([]string, 0, len(this.Annotations)) - for k, _ := range this.Annotations { - keysForAnnotations = append(keysForAnnotations, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) - mapStringForAnnotations := "map[string]string{" - for _, k := range keysForAnnotations { - mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) - } - mapStringForAnnotations += "}" - s := strings.Join([]string{`&UpdateTaskRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "Any", "types1.Any", 1) + `,`, - `Annotations:` + mapStringForAnnotations + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *StartRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StartRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *StartResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StartResponse{`, - `Pid:` + fmt.Sprintf("%v", this.Pid) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *WaitRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WaitRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *WaitResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WaitResponse{`, - `ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`, - `ExitedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExitedAt), "Timestamp", "types1.Timestamp", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *StatsRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatsRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *StatsResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatsResponse{`, - `Stats:` + strings.Replace(fmt.Sprintf("%v", this.Stats), "Any", "types1.Any", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ConnectRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ConnectRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ConnectResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ConnectResponse{`, - `ShimPid:` + fmt.Sprintf("%v", this.ShimPid) + `,`, - `TaskPid:` + fmt.Sprintf("%v", this.TaskPid) + `,`, - `Version:` + fmt.Sprintf("%v", this.Version) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ShutdownRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ShutdownRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Now:` + fmt.Sprintf("%v", this.Now) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *PauseRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PauseRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ResumeRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResumeRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringShim(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} - -type TaskService interface { - State(ctx context.Context, req *StateRequest) (*StateResponse, error) - Create(ctx context.Context, req *CreateTaskRequest) (*CreateTaskResponse, error) - Start(ctx context.Context, req *StartRequest) (*StartResponse, error) - Delete(ctx context.Context, req *DeleteRequest) (*DeleteResponse, error) - Pids(ctx context.Context, req *PidsRequest) (*PidsResponse, error) - Pause(ctx context.Context, req *PauseRequest) (*types1.Empty, error) - Resume(ctx context.Context, req *ResumeRequest) (*types1.Empty, error) - Checkpoint(ctx context.Context, req *CheckpointTaskRequest) (*types1.Empty, error) - Kill(ctx context.Context, req *KillRequest) (*types1.Empty, error) - Exec(ctx context.Context, req *ExecProcessRequest) (*types1.Empty, error) - ResizePty(ctx context.Context, req *ResizePtyRequest) (*types1.Empty, error) - CloseIO(ctx context.Context, req *CloseIORequest) (*types1.Empty, error) - Update(ctx context.Context, req *UpdateTaskRequest) (*types1.Empty, error) - Wait(ctx context.Context, req *WaitRequest) (*WaitResponse, error) - Stats(ctx context.Context, req *StatsRequest) (*StatsResponse, error) - Connect(ctx context.Context, req *ConnectRequest) (*ConnectResponse, error) - Shutdown(ctx context.Context, req *ShutdownRequest) (*types1.Empty, error) -} - -func RegisterTaskService(srv *github_com_containerd_ttrpc.Server, svc TaskService) { - srv.Register("containerd.task.v2.Task", map[string]github_com_containerd_ttrpc.Method{ - "State": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req StateRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.State(ctx, &req) - }, - "Create": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req CreateTaskRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Create(ctx, &req) - }, - "Start": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req StartRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Start(ctx, &req) - }, - "Delete": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req DeleteRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Delete(ctx, &req) - }, - "Pids": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req PidsRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Pids(ctx, &req) - }, - "Pause": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req PauseRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Pause(ctx, &req) - }, - "Resume": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req ResumeRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Resume(ctx, &req) - }, - "Checkpoint": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req CheckpointTaskRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Checkpoint(ctx, &req) - }, - "Kill": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req KillRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Kill(ctx, &req) - }, - "Exec": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req ExecProcessRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Exec(ctx, &req) - }, - "ResizePty": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req ResizePtyRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.ResizePty(ctx, &req) - }, - "CloseIO": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req CloseIORequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.CloseIO(ctx, &req) - }, - "Update": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req UpdateTaskRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Update(ctx, &req) - }, - "Wait": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req WaitRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Wait(ctx, &req) - }, - "Stats": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req StatsRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Stats(ctx, &req) - }, - "Connect": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req ConnectRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Connect(ctx, &req) - }, - "Shutdown": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) { - var req ShutdownRequest - if err := unmarshal(&req); err != nil { - return nil, err - } - return svc.Shutdown(ctx, &req) - }, - }) -} - -type taskClient struct { - client *github_com_containerd_ttrpc.Client -} - -func NewTaskClient(client *github_com_containerd_ttrpc.Client) TaskService { - return &taskClient{ - client: client, - } -} - -func (c *taskClient) State(ctx context.Context, req *StateRequest) (*StateResponse, error) { - var resp StateResponse - if err := c.client.Call(ctx, "containerd.task.v2.Task", "State", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) Create(ctx context.Context, req *CreateTaskRequest) (*CreateTaskResponse, error) { - var resp CreateTaskResponse - if err := c.client.Call(ctx, "containerd.task.v2.Task", "Create", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) Start(ctx context.Context, req *StartRequest) (*StartResponse, error) { - var resp StartResponse - if err := c.client.Call(ctx, "containerd.task.v2.Task", "Start", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) Delete(ctx context.Context, req *DeleteRequest) (*DeleteResponse, error) { - var resp DeleteResponse - if err := c.client.Call(ctx, "containerd.task.v2.Task", "Delete", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) Pids(ctx context.Context, req *PidsRequest) (*PidsResponse, error) { - var resp PidsResponse - if err := c.client.Call(ctx, "containerd.task.v2.Task", "Pids", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) Pause(ctx context.Context, req *PauseRequest) (*types1.Empty, error) { - var resp types1.Empty - if err := c.client.Call(ctx, "containerd.task.v2.Task", "Pause", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) Resume(ctx context.Context, req *ResumeRequest) (*types1.Empty, error) { - var resp types1.Empty - if err := c.client.Call(ctx, "containerd.task.v2.Task", "Resume", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) Checkpoint(ctx context.Context, req *CheckpointTaskRequest) (*types1.Empty, error) { - var resp types1.Empty - if err := c.client.Call(ctx, "containerd.task.v2.Task", "Checkpoint", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) Kill(ctx context.Context, req *KillRequest) (*types1.Empty, error) { - var resp types1.Empty - if err := c.client.Call(ctx, "containerd.task.v2.Task", "Kill", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) Exec(ctx context.Context, req *ExecProcessRequest) (*types1.Empty, error) { - var resp types1.Empty - if err := c.client.Call(ctx, "containerd.task.v2.Task", "Exec", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) ResizePty(ctx context.Context, req *ResizePtyRequest) (*types1.Empty, error) { - var resp types1.Empty - if err := c.client.Call(ctx, "containerd.task.v2.Task", "ResizePty", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) CloseIO(ctx context.Context, req *CloseIORequest) (*types1.Empty, error) { - var resp types1.Empty - if err := c.client.Call(ctx, "containerd.task.v2.Task", "CloseIO", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) Update(ctx context.Context, req *UpdateTaskRequest) (*types1.Empty, error) { - var resp types1.Empty - if err := c.client.Call(ctx, "containerd.task.v2.Task", "Update", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) Wait(ctx context.Context, req *WaitRequest) (*WaitResponse, error) { - var resp WaitResponse - if err := c.client.Call(ctx, "containerd.task.v2.Task", "Wait", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) Stats(ctx context.Context, req *StatsRequest) (*StatsResponse, error) { - var resp StatsResponse - if err := c.client.Call(ctx, "containerd.task.v2.Task", "Stats", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) Connect(ctx context.Context, req *ConnectRequest) (*ConnectResponse, error) { - var resp ConnectResponse - if err := c.client.Call(ctx, "containerd.task.v2.Task", "Connect", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} - -func (c *taskClient) Shutdown(ctx context.Context, req *ShutdownRequest) (*types1.Empty, error) { - var resp types1.Empty - if err := c.client.Call(ctx, "containerd.task.v2.Task", "Shutdown", req, &resp); err != nil { - return nil, err - } - return &resp, nil -} -func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CreateTaskRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CreateTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Bundle", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Bundle = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rootfs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rootfs = append(m.Rootfs, &types.Mount{}) - if err := m.Rootfs[len(m.Rootfs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Terminal = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdin = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdout = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stderr = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Checkpoint", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Checkpoint = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ParentCheckpoint", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ParentCheckpoint = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Options == nil { - m.Options = &types1.Any{} - } - if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CreateTaskResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CreateTaskResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CreateTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) - } - m.Pid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) - } - m.Pid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) - } - m.ExitStatus = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExitStatus |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExecProcessRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExecProcessRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExecProcessRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Terminal = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdin = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdout = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stderr = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Spec == nil { - m.Spec = &types1.Any{} - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExecProcessResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExecProcessResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExecProcessResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResizePtyRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResizePtyRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResizePtyRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Width", wireType) - } - m.Width = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Width |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) - } - m.Height = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Height |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StateRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StateRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StateRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StateResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StateResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StateResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Bundle", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Bundle = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) - } - m.Pid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - m.Status = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Status |= task.Status(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdin = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stdout = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stderr = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Terminal = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) - } - m.ExitStatus = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExitStatus |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *KillRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: KillRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: KillRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType) - } - m.Signal = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Signal |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field All", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.All = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CloseIORequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CloseIORequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CloseIORequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Stdin = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PidsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PidsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PidsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PidsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PidsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PidsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Processes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Processes = append(m.Processes, &task.ProcessInfo{}) - if err := m.Processes[len(m.Processes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CheckpointTaskRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CheckpointTaskRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CheckpointTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Options == nil { - m.Options = &types1.Any{} - } - if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UpdateTaskRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateTaskRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Resources == nil { - m.Resources = &types1.Any{} - } - if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Annotations == nil { - m.Annotations = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthShim - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthShim - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthShim - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthShim - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Annotations[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StartRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StartRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StartRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StartResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StartResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StartResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType) - } - m.Pid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Pid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WaitRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WaitRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WaitRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExecID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WaitResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WaitResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WaitResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType) - } - m.ExitStatus = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExitStatus |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stats", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Stats == nil { - m.Stats = &types1.Any{} - } - if err := m.Stats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConnectRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConnectRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConnectRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConnectResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConnectResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConnectResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ShimPid", wireType) - } - m.ShimPid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ShimPid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TaskPid", wireType) - } - m.TaskPid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TaskPid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ShutdownRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ShutdownRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ShutdownRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Now", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Now = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PauseRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PauseRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PauseRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResumeRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResumeRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResumeRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowShim - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthShim - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthShim - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipShim(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthShim - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipShim(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowShim - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowShim - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowShim - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthShim - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupShim - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthShim - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthShim = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowShim = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupShim = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/containerd/containerd/sandbox.go b/vendor/github.com/containerd/containerd/sandbox.go new file mode 100644 index 0000000000000..2e46da9f32ca5 --- /dev/null +++ b/vendor/github.com/containerd/containerd/sandbox.go @@ -0,0 +1,247 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package containerd + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/oci" + "github.com/containerd/containerd/protobuf/types" + api "github.com/containerd/containerd/sandbox" + "github.com/containerd/typeurl/v2" +) + +// Sandbox is a high level client to containerd's sandboxes. +type Sandbox interface { + // ID is a sandbox identifier + ID() string + // PID returns sandbox's process PID or error if its not yet started. + PID() (uint32, error) + // NewContainer creates new container that will belong to this sandbox + NewContainer(ctx context.Context, id string, opts ...NewContainerOpts) (Container, error) + // Labels returns the labels set on the sandbox + Labels(ctx context.Context) (map[string]string, error) + // Start starts new sandbox instance + Start(ctx context.Context) error + // Stop sends stop request to the shim instance. + Stop(ctx context.Context) error + // Wait blocks until sandbox process exits. + Wait(ctx context.Context) (<-chan ExitStatus, error) + // Shutdown removes sandbox from the metadata store and shutdowns shim instance. + Shutdown(ctx context.Context) error +} + +type sandboxClient struct { + pid *uint32 + client *Client + metadata api.Sandbox +} + +func (s *sandboxClient) ID() string { + return s.metadata.ID +} + +func (s *sandboxClient) PID() (uint32, error) { + if s.pid == nil { + return 0, fmt.Errorf("sandbox not started") + } + + return *s.pid, nil +} + +func (s *sandboxClient) NewContainer(ctx context.Context, id string, opts ...NewContainerOpts) (Container, error) { + return s.client.NewContainer(ctx, id, append(opts, WithSandbox(s.ID()))...) +} + +func (s *sandboxClient) Labels(ctx context.Context) (map[string]string, error) { + sandbox, err := s.client.SandboxStore().Get(ctx, s.ID()) + if err != nil { + return nil, err + } + + return sandbox.Labels, nil +} + +func (s *sandboxClient) Start(ctx context.Context) error { + resp, err := s.client.SandboxController().Start(ctx, s.ID()) + if err != nil { + return err + } + + s.pid = &resp.Pid + return nil +} + +func (s *sandboxClient) Wait(ctx context.Context) (<-chan ExitStatus, error) { + c := make(chan ExitStatus, 1) + go func() { + defer close(c) + + exitStatus, err := s.client.SandboxController().Wait(ctx, s.ID()) + if err != nil { + c <- ExitStatus{ + code: UnknownExitStatus, + err: err, + } + return + } + + c <- ExitStatus{ + code: exitStatus.ExitStatus, + exitedAt: exitStatus.ExitedAt, + } + }() + + return c, nil +} + +func (s *sandboxClient) Stop(ctx context.Context) error { + return s.client.SandboxController().Stop(ctx, s.ID()) +} + +func (s *sandboxClient) Shutdown(ctx context.Context) error { + if err := s.client.SandboxController().Shutdown(ctx, s.ID()); err != nil { + return fmt.Errorf("failed to shutdown sandbox: %w", err) + } + + if err := s.client.SandboxStore().Delete(ctx, s.ID()); err != nil { + return fmt.Errorf("failed to delete sandbox from store: %w", err) + } + + return nil +} + +// NewSandbox creates new sandbox client +func (c *Client) NewSandbox(ctx context.Context, sandboxID string, opts ...NewSandboxOpts) (Sandbox, error) { + if sandboxID == "" { + return nil, errors.New("sandbox ID must be specified") + } + + newSandbox := api.Sandbox{ + ID: sandboxID, + CreatedAt: time.Now().UTC(), + UpdatedAt: time.Now().UTC(), + } + + for _, opt := range opts { + if err := opt(ctx, c, &newSandbox); err != nil { + return nil, err + } + } + + metadata, err := c.SandboxStore().Create(ctx, newSandbox) + if err != nil { + return nil, err + } + + return &sandboxClient{ + pid: nil, // Not yet started + client: c, + metadata: metadata, + }, nil +} + +// LoadSandbox laods existing sandbox metadata object using the id +func (c *Client) LoadSandbox(ctx context.Context, id string) (Sandbox, error) { + sandbox, err := c.SandboxStore().Get(ctx, id) + if err != nil { + return nil, err + } + + status, err := c.SandboxController().Status(ctx, id, false) + if err != nil { + return nil, fmt.Errorf("failed to load sandbox %s, status request failed: %w", id, err) + } + + return &sandboxClient{ + pid: &status.Pid, + client: c, + metadata: sandbox, + }, nil +} + +// NewSandboxOpts is a sandbox options and extensions to be provided by client +type NewSandboxOpts func(ctx context.Context, client *Client, sandbox *api.Sandbox) error + +// WithSandboxRuntime allows a user to specify the runtime to be used to run a sandbox +func WithSandboxRuntime(name string, options interface{}) NewSandboxOpts { + return func(ctx context.Context, client *Client, s *api.Sandbox) error { + if options == nil { + options = &types.Empty{} + } + + opts, err := typeurl.MarshalAny(options) + if err != nil { + return fmt.Errorf("failed to marshal sandbox runtime options: %w", err) + } + + s.Runtime = api.RuntimeOpts{ + Name: name, + Options: opts, + } + + return nil + } +} + +// WithSandboxSpec will provide the sandbox runtime spec +func WithSandboxSpec(s *oci.Spec, opts ...oci.SpecOpts) NewSandboxOpts { + return func(ctx context.Context, client *Client, sandbox *api.Sandbox) error { + c := &containers.Container{ID: sandbox.ID} + + if err := oci.ApplyOpts(ctx, client, c, s, opts...); err != nil { + return err + } + + spec, err := typeurl.MarshalAny(s) + if err != nil { + return fmt.Errorf("failed to marshal spec: %w", err) + } + + sandbox.Spec = spec + return nil + } +} + +// WithSandboxExtension attaches an extension to sandbox +func WithSandboxExtension(name string, ext interface{}) NewSandboxOpts { + return func(ctx context.Context, client *Client, s *api.Sandbox) error { + if s.Extensions == nil { + s.Extensions = make(map[string]typeurl.Any) + } + + any, err := typeurl.MarshalAny(ext) + if err != nil { + return fmt.Errorf("failed to marshal sandbox extension: %w", err) + } + + s.Extensions[name] = any + return err + } +} + +// WithSandboxLabels attaches map of labels to sandbox +func WithSandboxLabels(labels map[string]string) NewSandboxOpts { + return func(ctx context.Context, client *Client, sandbox *api.Sandbox) error { + sandbox.Labels = labels + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/sandbox/bridge.go b/vendor/github.com/containerd/containerd/sandbox/bridge.go new file mode 100644 index 0000000000000..bc7d999ce5d58 --- /dev/null +++ b/vendor/github.com/containerd/containerd/sandbox/bridge.go @@ -0,0 +1,77 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sandbox + +import ( + "context" + "fmt" + + "github.com/containerd/ttrpc" + "google.golang.org/grpc" + + api "github.com/containerd/containerd/api/runtime/sandbox/v1" +) + +// NewClient returns a new sandbox client that handles both GRPC and TTRPC clients. +func NewClient(client interface{}) (api.TTRPCSandboxService, error) { + switch c := client.(type) { + case *ttrpc.Client: + return api.NewTTRPCSandboxClient(c), nil + case grpc.ClientConnInterface: + return &grpcBridge{api.NewSandboxClient(c)}, nil + default: + return nil, fmt.Errorf("unsupported client type %T", client) + } +} + +type grpcBridge struct { + client api.SandboxClient +} + +var _ api.TTRPCSandboxService = (*grpcBridge)(nil) + +func (g *grpcBridge) CreateSandbox(ctx context.Context, request *api.CreateSandboxRequest) (*api.CreateSandboxResponse, error) { + return g.client.CreateSandbox(ctx, request) +} + +func (g *grpcBridge) StartSandbox(ctx context.Context, request *api.StartSandboxRequest) (*api.StartSandboxResponse, error) { + return g.client.StartSandbox(ctx, request) +} + +func (g *grpcBridge) Platform(ctx context.Context, request *api.PlatformRequest) (*api.PlatformResponse, error) { + return g.client.Platform(ctx, request) +} + +func (g *grpcBridge) StopSandbox(ctx context.Context, request *api.StopSandboxRequest) (*api.StopSandboxResponse, error) { + return g.client.StopSandbox(ctx, request) +} + +func (g *grpcBridge) WaitSandbox(ctx context.Context, request *api.WaitSandboxRequest) (*api.WaitSandboxResponse, error) { + return g.client.WaitSandbox(ctx, request) +} + +func (g *grpcBridge) SandboxStatus(ctx context.Context, request *api.SandboxStatusRequest) (*api.SandboxStatusResponse, error) { + return g.client.SandboxStatus(ctx, request) +} + +func (g *grpcBridge) PingSandbox(ctx context.Context, request *api.PingRequest) (*api.PingResponse, error) { + return g.client.PingSandbox(ctx, request) +} + +func (g *grpcBridge) ShutdownSandbox(ctx context.Context, request *api.ShutdownSandboxRequest) (*api.ShutdownSandboxResponse, error) { + return g.client.ShutdownSandbox(ctx, request) +} diff --git a/vendor/github.com/containerd/containerd/sandbox/controller.go b/vendor/github.com/containerd/containerd/sandbox/controller.go new file mode 100644 index 0000000000000..43c4a0bed0e2c --- /dev/null +++ b/vendor/github.com/containerd/containerd/sandbox/controller.go @@ -0,0 +1,116 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sandbox + +import ( + "context" + "fmt" + "time" + + "github.com/containerd/containerd/api/types" + "github.com/containerd/containerd/platforms" + "github.com/containerd/typeurl/v2" +) + +type CreateOptions struct { + Rootfs []*types.Mount + // Options are used to pass arbitrary options to the shim when creating a new sandbox. + // CRI will use this to pass PodSandboxConfig. + // Don't confuse this with Runtime options, which are passed at shim instance start + // to setup global shim configuration. + Options typeurl.Any +} + +type CreateOpt func(*CreateOptions) error + +// WithRootFS is used to create a sandbox with the provided rootfs mount +// TODO: Switch to mount.Mount once target added +func WithRootFS(m []*types.Mount) CreateOpt { + return func(co *CreateOptions) error { + co.Rootfs = m + return nil + } +} + +// WithOptions allows passing arbitrary options when creating a new sandbox. +func WithOptions(options any) CreateOpt { + return func(co *CreateOptions) error { + var err error + co.Options, err = typeurl.MarshalAny(options) + if err != nil { + return fmt.Errorf("failed to marshal sandbox options: %w", err) + } + + return nil + } +} + +type StopOptions struct { + Timeout *time.Duration +} + +type StopOpt func(*StopOptions) + +func WithTimeout(timeout time.Duration) StopOpt { + return func(so *StopOptions) { + so.Timeout = &timeout + } +} + +// Controller is an interface to manage sandboxes at runtime. +// When running in sandbox mode, shim expected to implement `SandboxService`. +// Shim lifetimes are now managed manually via sandbox API by the containerd's client. +type Controller interface { + // Create is used to initialize sandbox environment. (mounts, any) + Create(ctx context.Context, sandboxID string, opts ...CreateOpt) error + // Start will start previously created sandbox. + Start(ctx context.Context, sandboxID string) (ControllerInstance, error) + // Platform returns target sandbox OS that will be used by Controller. + // containerd will rely on this to generate proper OCI spec. + Platform(_ctx context.Context, _sandboxID string) (platforms.Platform, error) + // Stop will stop sandbox instance + Stop(ctx context.Context, sandboxID string, opts ...StopOpt) error + // Wait blocks until sandbox process exits. + Wait(ctx context.Context, sandboxID string) (ExitStatus, error) + // Status will query sandbox process status. It is heavier than Ping call and must be used whenever you need to + // gather metadata about current sandbox state (status, uptime, resource use, etc). + Status(ctx context.Context, sandboxID string, verbose bool) (ControllerStatus, error) + // Shutdown deletes and cleans all tasks and sandbox instance. + Shutdown(ctx context.Context, sandboxID string) error +} + +type ControllerInstance struct { + SandboxID string + Pid uint32 + CreatedAt time.Time + Labels map[string]string +} + +type ExitStatus struct { + ExitStatus uint32 + ExitedAt time.Time +} + +type ControllerStatus struct { + SandboxID string + Pid uint32 + State string + Info map[string]string + CreatedAt time.Time + ExitedAt time.Time + Extra typeurl.Any +} diff --git a/vendor/github.com/containerd/containerd/sandbox/helpers.go b/vendor/github.com/containerd/containerd/sandbox/helpers.go new file mode 100644 index 0000000000000..bfe0b23d33df0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/sandbox/helpers.go @@ -0,0 +1,68 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sandbox + +import ( + "github.com/containerd/containerd/api/types" + "github.com/containerd/containerd/protobuf" + gogo_types "github.com/containerd/containerd/protobuf/types" + "github.com/containerd/typeurl/v2" +) + +// ToProto will map Sandbox struct to it's protobuf definition +func ToProto(sandbox *Sandbox) *types.Sandbox { + extensions := make(map[string]*gogo_types.Any) + for k, v := range sandbox.Extensions { + extensions[k] = protobuf.FromAny(v) + } + return &types.Sandbox{ + SandboxID: sandbox.ID, + Runtime: &types.Sandbox_Runtime{ + Name: sandbox.Runtime.Name, + Options: protobuf.FromAny(sandbox.Runtime.Options), + }, + Labels: sandbox.Labels, + CreatedAt: protobuf.ToTimestamp(sandbox.CreatedAt), + UpdatedAt: protobuf.ToTimestamp(sandbox.UpdatedAt), + Extensions: extensions, + Spec: protobuf.FromAny(sandbox.Spec), + } +} + +// FromProto map protobuf sandbox definition to Sandbox struct +func FromProto(sandboxpb *types.Sandbox) Sandbox { + runtime := RuntimeOpts{ + Name: sandboxpb.Runtime.Name, + Options: sandboxpb.Runtime.Options, + } + + extensions := make(map[string]typeurl.Any) + for k, v := range sandboxpb.Extensions { + v := v + extensions[k] = v + } + + return Sandbox{ + ID: sandboxpb.SandboxID, + Labels: sandboxpb.Labels, + Runtime: runtime, + Spec: sandboxpb.Spec, + CreatedAt: protobuf.FromTimestamp(sandboxpb.CreatedAt), + UpdatedAt: protobuf.FromTimestamp(sandboxpb.UpdatedAt), + Extensions: extensions, + } +} diff --git a/vendor/github.com/containerd/containerd/sandbox/proxy/controller.go b/vendor/github.com/containerd/containerd/sandbox/proxy/controller.go new file mode 100644 index 0000000000000..4c0a51d2e7fd7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/sandbox/proxy/controller.go @@ -0,0 +1,141 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package proxy + +import ( + "context" + + api "github.com/containerd/containerd/api/services/sandbox/v1" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/sandbox" + "google.golang.org/protobuf/types/known/anypb" +) + +// remoteSandboxController is a low level GRPC client for containerd's sandbox controller service +type remoteSandboxController struct { + client api.ControllerClient +} + +var _ sandbox.Controller = (*remoteSandboxController)(nil) + +// NewSandboxController creates a client for a sandbox controller +func NewSandboxController(client api.ControllerClient) sandbox.Controller { + return &remoteSandboxController{client: client} +} + +func (s *remoteSandboxController) Create(ctx context.Context, sandboxID string, opts ...sandbox.CreateOpt) error { + var options sandbox.CreateOptions + for _, opt := range opts { + opt(&options) + } + _, err := s.client.Create(ctx, &api.ControllerCreateRequest{ + SandboxID: sandboxID, + Rootfs: options.Rootfs, + Options: &anypb.Any{ + TypeUrl: options.Options.GetTypeUrl(), + Value: options.Options.GetValue(), + }, + }) + if err != nil { + return errdefs.FromGRPC(err) + } + + return nil +} + +func (s *remoteSandboxController) Start(ctx context.Context, sandboxID string) (sandbox.ControllerInstance, error) { + resp, err := s.client.Start(ctx, &api.ControllerStartRequest{SandboxID: sandboxID}) + if err != nil { + return sandbox.ControllerInstance{}, errdefs.FromGRPC(err) + } + + return sandbox.ControllerInstance{ + SandboxID: sandboxID, + Pid: resp.GetPid(), + CreatedAt: resp.GetCreatedAt().AsTime(), + Labels: resp.GetLabels(), + }, nil +} + +func (s *remoteSandboxController) Platform(ctx context.Context, sandboxID string) (platforms.Platform, error) { + resp, err := s.client.Platform(ctx, &api.ControllerPlatformRequest{SandboxID: sandboxID}) + if err != nil { + return platforms.Platform{}, errdefs.FromGRPC(err) + } + + platform := resp.GetPlatform() + return platforms.Platform{ + Architecture: platform.GetArchitecture(), + OS: platform.GetOS(), + Variant: platform.GetVariant(), + }, nil +} + +func (s *remoteSandboxController) Stop(ctx context.Context, sandboxID string, opts ...sandbox.StopOpt) error { + var soptions sandbox.StopOptions + for _, opt := range opts { + opt(&soptions) + } + req := &api.ControllerStopRequest{SandboxID: sandboxID} + if soptions.Timeout != nil { + req.TimeoutSecs = uint32(soptions.Timeout.Seconds()) + } + _, err := s.client.Stop(ctx, req) + if err != nil { + return errdefs.FromGRPC(err) + } + + return nil +} + +func (s *remoteSandboxController) Shutdown(ctx context.Context, sandboxID string) error { + _, err := s.client.Shutdown(ctx, &api.ControllerShutdownRequest{SandboxID: sandboxID}) + if err != nil { + return errdefs.FromGRPC(err) + } + + return nil +} + +func (s *remoteSandboxController) Wait(ctx context.Context, sandboxID string) (sandbox.ExitStatus, error) { + resp, err := s.client.Wait(ctx, &api.ControllerWaitRequest{SandboxID: sandboxID}) + if err != nil { + return sandbox.ExitStatus{}, errdefs.FromGRPC(err) + } + + return sandbox.ExitStatus{ + ExitStatus: resp.GetExitStatus(), + ExitedAt: resp.GetExitedAt().AsTime(), + }, nil +} + +func (s *remoteSandboxController) Status(ctx context.Context, sandboxID string, verbose bool) (sandbox.ControllerStatus, error) { + resp, err := s.client.Status(ctx, &api.ControllerStatusRequest{SandboxID: sandboxID, Verbose: verbose}) + if err != nil { + return sandbox.ControllerStatus{}, errdefs.FromGRPC(err) + } + return sandbox.ControllerStatus{ + SandboxID: sandboxID, + Pid: resp.GetPid(), + State: resp.GetState(), + Info: resp.GetInfo(), + CreatedAt: resp.GetCreatedAt().AsTime(), + ExitedAt: resp.GetExitedAt().AsTime(), + Extra: resp.GetExtra(), + }, nil +} diff --git a/vendor/github.com/containerd/containerd/sandbox/proxy/store.go b/vendor/github.com/containerd/containerd/sandbox/proxy/store.go new file mode 100644 index 0000000000000..64e4a2b320392 --- /dev/null +++ b/vendor/github.com/containerd/containerd/sandbox/proxy/store.go @@ -0,0 +1,95 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package proxy + +import ( + "context" + + api "github.com/containerd/containerd/api/services/sandbox/v1" + "github.com/containerd/containerd/errdefs" + sb "github.com/containerd/containerd/sandbox" +) + +// remoteSandboxStore is a low-level containerd client to manage sandbox environments metadata +type remoteSandboxStore struct { + client api.StoreClient +} + +var _ sb.Store = (*remoteSandboxStore)(nil) + +// NewSandboxStore create a client for a sandbox store +func NewSandboxStore(client api.StoreClient) sb.Store { + return &remoteSandboxStore{client: client} +} + +func (s *remoteSandboxStore) Create(ctx context.Context, sandbox sb.Sandbox) (sb.Sandbox, error) { + resp, err := s.client.Create(ctx, &api.StoreCreateRequest{ + Sandbox: sb.ToProto(&sandbox), + }) + if err != nil { + return sb.Sandbox{}, errdefs.FromGRPC(err) + } + + return sb.FromProto(resp.Sandbox), nil +} + +func (s *remoteSandboxStore) Update(ctx context.Context, sandbox sb.Sandbox, fieldpaths ...string) (sb.Sandbox, error) { + resp, err := s.client.Update(ctx, &api.StoreUpdateRequest{ + Sandbox: sb.ToProto(&sandbox), + Fields: fieldpaths, + }) + if err != nil { + return sb.Sandbox{}, errdefs.FromGRPC(err) + } + + return sb.FromProto(resp.Sandbox), nil +} + +func (s *remoteSandboxStore) Get(ctx context.Context, id string) (sb.Sandbox, error) { + resp, err := s.client.Get(ctx, &api.StoreGetRequest{ + SandboxID: id, + }) + if err != nil { + return sb.Sandbox{}, errdefs.FromGRPC(err) + } + + return sb.FromProto(resp.Sandbox), nil +} + +func (s *remoteSandboxStore) List(ctx context.Context, filters ...string) ([]sb.Sandbox, error) { + resp, err := s.client.List(ctx, &api.StoreListRequest{ + Filters: filters, + }) + if err != nil { + return nil, errdefs.FromGRPC(err) + } + + out := make([]sb.Sandbox, len(resp.List)) + for i := range resp.List { + out[i] = sb.FromProto(resp.List[i]) + } + + return out, nil +} + +func (s *remoteSandboxStore) Delete(ctx context.Context, id string) error { + _, err := s.client.Delete(ctx, &api.StoreDeleteRequest{ + SandboxID: id, + }) + + return errdefs.FromGRPC(err) +} diff --git a/vendor/github.com/containerd/containerd/sandbox/store.go b/vendor/github.com/containerd/containerd/sandbox/store.go new file mode 100644 index 0000000000000..cda646dde2796 --- /dev/null +++ b/vendor/github.com/containerd/containerd/sandbox/store.go @@ -0,0 +1,116 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sandbox + +import ( + "context" + "fmt" + "time" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/typeurl/v2" +) + +// Sandbox is an object stored in metadata database +type Sandbox struct { + // ID uniquely identifies the sandbox in a namespace + ID string + // Labels provide metadata extension for a sandbox + Labels map[string]string + // Runtime shim to use for this sandbox + Runtime RuntimeOpts + // Spec carries the runtime specification used to implement the sandbox + Spec typeurl.Any + // CreatedAt is the time at which the sandbox was created + CreatedAt time.Time + // UpdatedAt is the time at which the sandbox was updated + UpdatedAt time.Time + // Extensions stores client-specified metadata + Extensions map[string]typeurl.Any +} + +// RuntimeOpts holds runtime specific information +type RuntimeOpts struct { + Name string + Options typeurl.Any +} + +// Store is a storage interface for sandbox metadata objects +type Store interface { + // Create a sandbox record in the store + Create(ctx context.Context, sandbox Sandbox) (Sandbox, error) + + // Update the sandbox with the provided sandbox object and fields + Update(ctx context.Context, sandbox Sandbox, fieldpaths ...string) (Sandbox, error) + + // Get sandbox metadata using the id + Get(ctx context.Context, id string) (Sandbox, error) + + // List returns sandboxes that match one or more of the provided filters + List(ctx context.Context, filters ...string) ([]Sandbox, error) + + // Delete a sandbox from metadata store using the id + Delete(ctx context.Context, id string) error +} + +// AddExtension is a helper function to add sandbox metadata extension. +func (s *Sandbox) AddExtension(name string, obj interface{}) error { + if s.Extensions == nil { + s.Extensions = map[string]typeurl.Any{} + } + + out, err := typeurl.MarshalAny(obj) + if err != nil { + return fmt.Errorf("failed to marshal sandbox extension %q: %w", name, err) + } + + s.Extensions[name] = out + return nil +} + +// AddLabel adds a label to sandbox's labels. +func (s *Sandbox) AddLabel(name string, value string) { + if s.Labels == nil { + s.Labels = map[string]string{} + } + + s.Labels[name] = value +} + +// GetExtension retrieves a sandbox extension by name. +func (s *Sandbox) GetExtension(name string, obj interface{}) error { + out, ok := s.Extensions[name] + if !ok { + return errdefs.ErrNotFound + } + + if err := typeurl.UnmarshalTo(out, obj); err != nil { + return fmt.Errorf("failed to unmarshal sandbox extension %q: %w", name, err) + } + + return nil +} + +// GetLabel retrieves a sandbox label by name. +func (s *Sandbox) GetLabel(name string) (string, error) { + out, ok := s.Labels[name] + if !ok { + return "", fmt.Errorf("unable to find label %q in sandbox metadata: %w", name, errdefs.ErrNotFound) + } + + return out, nil +} diff --git a/vendor/github.com/containerd/containerd/services.go b/vendor/github.com/containerd/containerd/services.go index e780e6ccfe4bc..edb8872e551c1 100644 --- a/vendor/github.com/containerd/containerd/services.go +++ b/vendor/github.com/containerd/containerd/services.go @@ -17,6 +17,8 @@ package containerd import ( + "fmt" + containersapi "github.com/containerd/containerd/api/services/containers/v1" "github.com/containerd/containerd/api/services/diff/v1" imagesapi "github.com/containerd/containerd/api/services/images/v1" @@ -28,6 +30,9 @@ import ( "github.com/containerd/containerd/images" "github.com/containerd/containerd/leases" "github.com/containerd/containerd/namespaces" + "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/sandbox" + srv "github.com/containerd/containerd/services" "github.com/containerd/containerd/services/introspection" "github.com/containerd/containerd/snapshots" ) @@ -43,6 +48,8 @@ type services struct { eventService EventService leasesService leases.Manager introspectionService introspection.Service + sandboxStore sandbox.Store + sandboxController sandbox.Controller } // ServicesOpt allows callers to set options on the services @@ -155,3 +162,95 @@ func WithIntrospectionService(in introspection.Service) ServicesOpt { s.introspectionService = in } } + +// WithSandboxStore sets the sandbox store. +func WithSandboxStore(client sandbox.Store) ServicesOpt { + return func(s *services) { + s.sandboxStore = client + } +} + +// WithSandboxController sets the sandbox controller. +func WithSandboxController(client sandbox.Controller) ServicesOpt { + return func(s *services) { + s.sandboxController = client + } +} + +// WithInMemoryServices is suitable for cases when there is need to use containerd's client from +// another (in-memory) containerd plugin (such as CRI). +func WithInMemoryServices(ic *plugin.InitContext) ClientOpt { + return func(c *clientOpts) error { + var opts []ServicesOpt + for t, fn := range map[plugin.Type]func(interface{}) ServicesOpt{ + plugin.EventPlugin: func(i interface{}) ServicesOpt { + return WithEventService(i.(EventService)) + }, + plugin.LeasePlugin: func(i interface{}) ServicesOpt { + return WithLeasesService(i.(leases.Manager)) + }, + plugin.SandboxStorePlugin: func(i interface{}) ServicesOpt { + return WithSandboxStore(i.(sandbox.Store)) + }, + plugin.SandboxControllerPlugin: func(i interface{}) ServicesOpt { + return WithSandboxController(i.(sandbox.Controller)) + }, + } { + i, err := ic.Get(t) + if err != nil { + return fmt.Errorf("failed to get %q plugin: %w", t, err) + } + opts = append(opts, fn(i)) + } + + plugins, err := ic.GetByType(plugin.ServicePlugin) + if err != nil { + return fmt.Errorf("failed to get service plugin: %w", err) + } + for s, fn := range map[string]func(interface{}) ServicesOpt{ + srv.ContentService: func(s interface{}) ServicesOpt { + return WithContentStore(s.(content.Store)) + }, + srv.ImagesService: func(s interface{}) ServicesOpt { + return WithImageClient(s.(imagesapi.ImagesClient)) + }, + srv.SnapshotsService: func(s interface{}) ServicesOpt { + return WithSnapshotters(s.(map[string]snapshots.Snapshotter)) + }, + srv.ContainersService: func(s interface{}) ServicesOpt { + return WithContainerClient(s.(containersapi.ContainersClient)) + }, + srv.TasksService: func(s interface{}) ServicesOpt { + return WithTaskClient(s.(tasks.TasksClient)) + }, + srv.DiffService: func(s interface{}) ServicesOpt { + return WithDiffClient(s.(diff.DiffClient)) + }, + srv.NamespacesService: func(s interface{}) ServicesOpt { + return WithNamespaceClient(s.(namespacesapi.NamespacesClient)) + }, + srv.IntrospectionService: func(s interface{}) ServicesOpt { + return WithIntrospectionClient(s.(introspectionapi.IntrospectionClient)) + }, + } { + p := plugins[s] + if p == nil { + return fmt.Errorf("service %q not found", s) + } + i, err := p.Instance() + if err != nil { + return fmt.Errorf("failed to get instance of service %q: %w", s, err) + } + if i == nil { + return fmt.Errorf("instance of service %q not found", s) + } + opts = append(opts, fn(i)) + } + + c.services = &services{} + for _, o := range opts { + o(c.services) + } + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/services/content/contentserver/contentserver.go b/vendor/github.com/containerd/containerd/services/content/contentserver/contentserver.go index eb5855a476c49..76a9e6eea27b2 100644 --- a/vendor/github.com/containerd/containerd/services/content/contentserver/contentserver.go +++ b/vendor/github.com/containerd/containerd/services/content/contentserver/contentserver.go @@ -26,10 +26,10 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" - ptypes "github.com/gogo/protobuf/types" + "github.com/containerd/containerd/protobuf" + ptypes "github.com/containerd/containerd/protobuf/types" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -37,6 +37,7 @@ import ( type service struct { store content.Store + api.UnimplementedContentServer } var bufPool = sync.Pool{ @@ -57,11 +58,12 @@ func (s *service) Register(server *grpc.Server) error { } func (s *service) Info(ctx context.Context, req *api.InfoRequest) (*api.InfoResponse, error) { - if err := req.Digest.Validate(); err != nil { + dg, err := digest.Parse(req.Digest) + if err != nil { return nil, status.Errorf(codes.InvalidArgument, "%q failed validation", req.Digest) } - bi, err := s.store.Info(ctx, req.Digest) + bi, err := s.store.Info(ctx, dg) if err != nil { return nil, errdefs.ToGRPC(err) } @@ -72,7 +74,8 @@ func (s *service) Info(ctx context.Context, req *api.InfoRequest) (*api.InfoResp } func (s *service) Update(ctx context.Context, req *api.UpdateRequest) (*api.UpdateResponse, error) { - if err := req.Info.Digest.Validate(); err != nil { + _, err := digest.Parse(req.Info.Digest) + if err != nil { return nil, status.Errorf(codes.InvalidArgument, "%q failed validation", req.Info.Digest) } @@ -88,8 +91,8 @@ func (s *service) Update(ctx context.Context, req *api.UpdateRequest) (*api.Upda func (s *service) List(req *api.ListContentRequest, session api.Content_ListServer) error { var ( - buffer []api.Info - sendBlock = func(block []api.Info) error { + buffer []*api.Info + sendBlock = func(block []*api.Info) error { // send last block return session.Send(&api.ListContentResponse{ Info: block, @@ -98,10 +101,10 @@ func (s *service) List(req *api.ListContentRequest, session api.Content_ListServ ) if err := s.store.Walk(session.Context(), func(info content.Info) error { - buffer = append(buffer, api.Info{ - Digest: info.Digest, - Size_: info.Size, - CreatedAt: info.CreatedAt, + buffer = append(buffer, &api.Info{ + Digest: info.Digest.String(), + Size: info.Size, + CreatedAt: protobuf.ToTimestamp(info.CreatedAt), Labels: info.Labels, }) @@ -130,11 +133,12 @@ func (s *service) List(req *api.ListContentRequest, session api.Content_ListServ func (s *service) Delete(ctx context.Context, req *api.DeleteContentRequest) (*ptypes.Empty, error) { log.G(ctx).WithField("digest", req.Digest).Debugf("delete content") - if err := req.Digest.Validate(); err != nil { + dg, err := digest.Parse(req.Digest) + if err != nil { return nil, status.Errorf(codes.InvalidArgument, err.Error()) } - if err := s.store.Delete(ctx, req.Digest); err != nil { + if err := s.store.Delete(ctx, dg); err != nil { return nil, errdefs.ToGRPC(err) } @@ -142,16 +146,17 @@ func (s *service) Delete(ctx context.Context, req *api.DeleteContentRequest) (*p } func (s *service) Read(req *api.ReadContentRequest, session api.Content_ReadServer) error { - if err := req.Digest.Validate(); err != nil { + dg, err := digest.Parse(req.Digest) + if err != nil { return status.Errorf(codes.InvalidArgument, "%v: %v", req.Digest, err) } - oi, err := s.store.Info(session.Context(), req.Digest) + oi, err := s.store.Info(session.Context(), dg) if err != nil { return errdefs.ToGRPC(err) } - ra, err := s.store.ReaderAt(session.Context(), ocispec.Descriptor{Digest: req.Digest}) + ra, err := s.store.ReaderAt(session.Context(), ocispec.Descriptor{Digest: dg}) if err != nil { return errdefs.ToGRPC(err) } @@ -161,7 +166,7 @@ func (s *service) Read(req *api.ReadContentRequest, session api.Content_ReadServ offset = req.Offset // size is read size, not the expected size of the blob (oi.Size), which the caller might not be aware of. // offset+size can be larger than oi.Size. - size = req.Size_ + size = req.Size // TODO(stevvooe): Using the global buffer pool. At 32KB, it is probably // little inefficient for work over a fast network. We can tune this later. @@ -216,12 +221,12 @@ func (s *service) Status(ctx context.Context, req *api.StatusRequest) (*api.Stat var resp api.StatusResponse resp.Status = &api.Status{ - StartedAt: status.StartedAt, - UpdatedAt: status.UpdatedAt, + StartedAt: protobuf.ToTimestamp(status.StartedAt), + UpdatedAt: protobuf.ToTimestamp(status.UpdatedAt), Ref: status.Ref, Offset: status.Offset, Total: status.Total, - Expected: status.Expected, + Expected: status.Expected.String(), } return &resp, nil @@ -235,13 +240,13 @@ func (s *service) ListStatuses(ctx context.Context, req *api.ListStatusesRequest var resp api.ListStatusesResponse for _, status := range statuses { - resp.Statuses = append(resp.Statuses, api.Status{ - StartedAt: status.StartedAt, - UpdatedAt: status.UpdatedAt, + resp.Statuses = append(resp.Statuses, &api.Status{ + StartedAt: protobuf.ToTimestamp(status.StartedAt), + UpdatedAt: protobuf.ToTimestamp(status.UpdatedAt), Ref: status.Ref, Offset: status.Offset, Total: status.Total, - Expected: status.Expected, + Expected: status.Expected.String(), }) } @@ -289,11 +294,11 @@ func (s *service) Write(session api.Content_WriteServer) (err error) { return status.Errorf(codes.InvalidArgument, "first message must have a reference") } - fields := logrus.Fields{ + fields := log.Fields{ "ref": ref, } total = req.Total - expected = req.Expected + expected = digest.Digest(req.Expected) if total > 0 { fields["total"] = total } @@ -341,12 +346,13 @@ func (s *service) Write(session api.Content_WriteServer) (err error) { // Supporting these two paths is quite awkward but it lets both API // users use the same writer style for each with a minimum of overhead. if req.Expected != "" { - if expected != "" && expected != req.Expected { - log.G(ctx).Debugf("commit digest differs from writer digest: %v != %v", req.Expected, expected) + dg := digest.Digest(req.Expected) + if expected != "" && expected != dg { + log.G(ctx).Debugf("commit digest differs from writer digest: %v != %v", dg, expected) } - expected = req.Expected + expected = dg - if _, err := s.store.Info(session.Context(), req.Expected); err == nil { + if _, err := s.store.Info(session.Context(), dg); err == nil { if err := wr.Close(); err != nil { log.G(ctx).WithError(err).Error("failed to close writer") } @@ -368,12 +374,12 @@ func (s *service) Write(session api.Content_WriteServer) (err error) { } switch req.Action { - case api.WriteActionStat: - msg.Digest = wr.Digest() - msg.StartedAt = ws.StartedAt - msg.UpdatedAt = ws.UpdatedAt + case api.WriteAction_STAT: + msg.Digest = wr.Digest().String() + msg.StartedAt = protobuf.ToTimestamp(ws.StartedAt) + msg.UpdatedAt = protobuf.ToTimestamp(ws.UpdatedAt) msg.Total = total - case api.WriteActionWrite, api.WriteActionCommit: + case api.WriteAction_WRITE, api.WriteAction_COMMIT: if req.Offset > 0 { // validate the offset if provided if req.Offset != ws.Offset { @@ -406,7 +412,7 @@ func (s *service) Write(session api.Content_WriteServer) (err error) { msg.Offset += int64(n) } - if req.Action == api.WriteActionCommit { + if req.Action == api.WriteAction_COMMIT { var opts []content.Opt if req.Labels != nil { opts = append(opts, content.WithLabels(req.Labels)) @@ -416,14 +422,14 @@ func (s *service) Write(session api.Content_WriteServer) (err error) { } } - msg.Digest = wr.Digest() + msg.Digest = wr.Digest().String() } if err := session.Send(&msg); err != nil { return err } - if req.Action == api.WriteActionCommit { + if req.Action == api.WriteAction_COMMIT { return nil } @@ -446,22 +452,22 @@ func (s *service) Abort(ctx context.Context, req *api.AbortRequest) (*ptypes.Emp return &ptypes.Empty{}, nil } -func infoToGRPC(info content.Info) api.Info { - return api.Info{ - Digest: info.Digest, - Size_: info.Size, - CreatedAt: info.CreatedAt, - UpdatedAt: info.UpdatedAt, +func infoToGRPC(info content.Info) *api.Info { + return &api.Info{ + Digest: info.Digest.String(), + Size: info.Size, + CreatedAt: protobuf.ToTimestamp(info.CreatedAt), + UpdatedAt: protobuf.ToTimestamp(info.UpdatedAt), Labels: info.Labels, } } -func infoFromGRPC(info api.Info) content.Info { +func infoFromGRPC(info *api.Info) content.Info { return content.Info{ - Digest: info.Digest, - Size: info.Size_, - CreatedAt: info.CreatedAt, - UpdatedAt: info.UpdatedAt, + Digest: digest.Digest(info.Digest), + Size: info.Size, + CreatedAt: protobuf.FromTimestamp(info.CreatedAt), + UpdatedAt: protobuf.FromTimestamp(info.UpdatedAt), Labels: info.Labels, } } diff --git a/vendor/github.com/containerd/containerd/services/introspection/introspection.go b/vendor/github.com/containerd/containerd/services/introspection/introspection.go index 71758fad88b86..7f88af4f9feb3 100644 --- a/vendor/github.com/containerd/containerd/services/introspection/introspection.go +++ b/vendor/github.com/containerd/containerd/services/introspection/introspection.go @@ -22,10 +22,10 @@ import ( api "github.com/containerd/containerd/api/services/introspection/v1" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" - ptypes "github.com/gogo/protobuf/types" + ptypes "github.com/containerd/containerd/protobuf/types" ) -// Service defines the instrospection service interface +// Service defines the introspection service interface type Service interface { Plugins(context.Context, []string) (*api.PluginsResponse, error) Server(context.Context, *ptypes.Empty) (*api.ServerResponse, error) diff --git a/vendor/github.com/containerd/containerd/services/introspection/local.go b/vendor/github.com/containerd/containerd/services/introspection/local.go index 47388e4371c03..7aa291b47bd7a 100644 --- a/vendor/github.com/containerd/containerd/services/introspection/local.go +++ b/vendor/github.com/containerd/containerd/services/introspection/local.go @@ -20,6 +20,7 @@ import ( context "context" "os" "path/filepath" + "runtime" "sync" api "github.com/containerd/containerd/api/services/introspection/v1" @@ -27,10 +28,11 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/filters" "github.com/containerd/containerd/plugin" + ptypes "github.com/containerd/containerd/protobuf/types" "github.com/containerd/containerd/services" - "github.com/gogo/googleapis/google/rpc" - ptypes "github.com/gogo/protobuf/types" "github.com/google/uuid" + "google.golang.org/genproto/googleapis/rpc/code" + rpc "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" "google.golang.org/grpc/status" ) @@ -55,7 +57,7 @@ type Local struct { mu sync.Mutex root string plugins *plugin.Set - pluginCache []api.Plugin + pluginCache []*api.Plugin } var _ = (api.IntrospectionClient)(&Local{}) @@ -74,14 +76,13 @@ func (l *Local) Plugins(ctx context.Context, req *api.PluginsRequest, _ ...grpc. return nil, errdefs.ToGRPCf(errdefs.ErrInvalidArgument, err.Error()) } - var plugins []api.Plugin + var plugins []*api.Plugin allPlugins := l.getPlugins() for _, p := range allPlugins { - if !filter.Match(adaptPlugin(p)) { - continue + p := p + if filter.Match(adaptPlugin(p)) { + plugins = append(plugins, p) } - - plugins = append(plugins, p) } return &api.PluginsResponse{ @@ -89,7 +90,7 @@ func (l *Local) Plugins(ctx context.Context, req *api.PluginsRequest, _ ...grpc. }, nil } -func (l *Local) getPlugins() []api.Plugin { +func (l *Local) getPlugins() []*api.Plugin { l.mu.Lock() defer l.mu.Unlock() plugins := l.plugins.GetAll() @@ -105,8 +106,18 @@ func (l *Local) Server(ctx context.Context, _ *ptypes.Empty, _ ...grpc.CallOptio if err != nil { return nil, errdefs.ToGRPC(err) } + pid := os.Getpid() + var pidns uint64 + if runtime.GOOS == "linux" { + pidns, err = statPIDNS(pid) + if err != nil { + return nil, errdefs.ToGRPC(err) + } + } return &api.ServerResponse{ - UUID: u, + UUID: u, + Pid: uint64(pid), + Pidns: pidns, }, nil } @@ -149,7 +160,7 @@ func (l *Local) uuidPath() string { } func adaptPlugin(o interface{}) filters.Adaptor { - obj := o.(api.Plugin) + obj := o.(*api.Plugin) return filters.AdapterFunc(func(fieldpath []string) (string, bool) { if len(fieldpath) == 0 { return "", false @@ -175,12 +186,12 @@ func adaptPlugin(o interface{}) filters.Adaptor { }) } -func pluginsToPB(plugins []*plugin.Plugin) []api.Plugin { - var pluginsPB []api.Plugin +func pluginsToPB(plugins []*plugin.Plugin) []*api.Plugin { + var pluginsPB []*api.Plugin for _, p := range plugins { - var platforms []types.Platform + var platforms []*types.Platform for _, p := range p.Meta.Platforms { - platforms = append(platforms, types.Platform{ + platforms = append(platforms, &types.Platform{ OS: p.OS, Architecture: p.Architecture, Variant: p.Variant, @@ -210,13 +221,13 @@ func pluginsToPB(plugins []*plugin.Plugin) []api.Plugin { } } else { initErr = &rpc.Status{ - Code: int32(rpc.UNKNOWN), + Code: int32(code.Code_UNKNOWN), Message: err.Error(), } } } - pluginsPB = append(pluginsPB, api.Plugin{ + pluginsPB = append(pluginsPB, &api.Plugin{ Type: p.Registration.Type.String(), ID: p.Registration.ID, Requires: requires, diff --git a/vendor/github.com/containerd/containerd/sys/fds.go b/vendor/github.com/containerd/containerd/services/introspection/pidns_linux.go similarity index 65% rename from vendor/github.com/containerd/containerd/sys/fds.go rename to vendor/github.com/containerd/containerd/services/introspection/pidns_linux.go index a71a9cd7e93de..4dce1a5e7bd34 100644 --- a/vendor/github.com/containerd/containerd/sys/fds.go +++ b/vendor/github.com/containerd/containerd/services/introspection/pidns_linux.go @@ -1,6 +1,3 @@ -//go:build !windows && !darwin -// +build !windows,!darwin - /* Copyright The containerd Authors. @@ -17,19 +14,23 @@ limitations under the License. */ -package sys +package introspection import ( + "fmt" "os" - "path/filepath" - "strconv" + "syscall" ) -// GetOpenFds returns the number of open fds for the process provided by pid -func GetOpenFds(pid int) (int, error) { - dirs, err := os.ReadDir(filepath.Join("/proc", strconv.Itoa(pid), "fd")) +func statPIDNS(pid int) (uint64, error) { + f := fmt.Sprintf("/proc/%d/ns/pid", pid) + st, err := os.Stat(f) if err != nil { - return -1, err + return 0, err + } + stSys, ok := st.Sys().(*syscall.Stat_t) + if !ok { + return 0, fmt.Errorf("%T is not *syscall.Stat_t", st.Sys()) } - return len(dirs), nil + return stSys.Ino, nil } diff --git a/vendor/github.com/containerd/containerd/services/introspection/pidns_others.go b/vendor/github.com/containerd/containerd/services/introspection/pidns_others.go new file mode 100644 index 0000000000000..c2cc475af3f3f --- /dev/null +++ b/vendor/github.com/containerd/containerd/services/introspection/pidns_others.go @@ -0,0 +1,23 @@ +//go:build !linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package introspection + +func statPIDNS(pid int) (uint64, error) { + return 0, nil +} diff --git a/vendor/github.com/containerd/containerd/services/introspection/service.go b/vendor/github.com/containerd/containerd/services/introspection/service.go index c11b8dc1ce3e0..60013b52c82bf 100644 --- a/vendor/github.com/containerd/containerd/services/introspection/service.go +++ b/vendor/github.com/containerd/containerd/services/introspection/service.go @@ -22,8 +22,8 @@ import ( api "github.com/containerd/containerd/api/services/introspection/v1" "github.com/containerd/containerd/plugin" + ptypes "github.com/containerd/containerd/protobuf/types" "github.com/containerd/containerd/services" - ptypes "github.com/gogo/protobuf/types" "google.golang.org/grpc" ) @@ -49,7 +49,7 @@ func init() { localClient, ok := i.(*Local) if !ok { - return nil, errors.New("Could not create a local client for introspection service") + return nil, errors.New("could not create a local client for introspection service") } localClient.UpdateLocal(ic.Root) @@ -62,6 +62,7 @@ func init() { type server struct { local api.IntrospectionClient + api.UnimplementedIntrospectionServer } var _ = (api.IntrospectionServer)(&server{}) diff --git a/vendor/github.com/containerd/containerd/services/server/config/config.go b/vendor/github.com/containerd/containerd/services/server/config/config.go index 4c475d43d7abc..340c93b31fdf0 100644 --- a/vendor/github.com/containerd/containerd/services/server/config/config.go +++ b/vendor/github.com/containerd/containerd/services/server/config/config.go @@ -104,17 +104,17 @@ func (c *Config) ValidateV2() error { return nil } for _, p := range c.DisabledPlugins { - if len(strings.Split(p, ".")) < 4 { + if !strings.HasPrefix(p, "io.containerd.") || len(strings.SplitN(p, ".", 4)) < 4 { return fmt.Errorf("invalid disabled plugin URI %q expect io.containerd.x.vx", p) } } for _, p := range c.RequiredPlugins { - if len(strings.Split(p, ".")) < 4 { + if !strings.HasPrefix(p, "io.containerd.") || len(strings.SplitN(p, ".", 4)) < 4 { return fmt.Errorf("invalid required plugin URI %q expect io.containerd.x.vx", p) } } for p := range c.Plugins { - if len(strings.Split(p, ".")) < 4 { + if !strings.HasPrefix(p, "io.containerd.") || len(strings.SplitN(p, ".", 4)) < 4 { return fmt.Errorf("invalid plugin key URI %q expect io.containerd.x.vx", p) } } @@ -147,7 +147,7 @@ type Debug struct { UID int `toml:"uid"` GID int `toml:"gid"` Level string `toml:"level"` - // Format represents the logging format + // Format represents the logging format. Supported values are 'text' and 'json'. Format string `toml:"format"` } @@ -168,44 +168,6 @@ type ProxyPlugin struct { Address string `toml:"address"` } -// BoltConfig defines the configuration values for the bolt plugin, which is -// loaded here, rather than back registered in the metadata package. -type BoltConfig struct { - // ContentSharingPolicy sets the sharing policy for content between - // namespaces. - // - // The default mode "shared" will make blobs available in all - // namespaces once it is pulled into any namespace. The blob will be pulled - // into the namespace if a writer is opened with the "Expected" digest that - // is already present in the backend. - // - // The alternative mode, "isolated" requires that clients prove they have - // access to the content by providing all of the content to the ingest - // before the blob is added to the namespace. - // - // Both modes share backing data, while "shared" will reduce total - // bandwidth across namespaces, at the cost of allowing access to any blob - // just by knowing its digest. - ContentSharingPolicy string `toml:"content_sharing_policy"` -} - -const ( - // SharingPolicyShared represents the "shared" sharing policy - SharingPolicyShared = "shared" - // SharingPolicyIsolated represents the "isolated" sharing policy - SharingPolicyIsolated = "isolated" -) - -// Validate validates if BoltConfig is valid -func (bc *BoltConfig) Validate() error { - switch bc.ContentSharingPolicy { - case SharingPolicyShared, SharingPolicyIsolated: - return nil - default: - return fmt.Errorf("unknown policy: %s: %w", bc.ContentSharingPolicy, errdefs.ErrInvalidArgument) - } -} - // Decode unmarshals a plugin specific configuration by plugin id func (c *Config) Decode(p *plugin.Registration) (interface{}, error) { id := p.URI() diff --git a/vendor/github.com/containerd/containerd/services/services.go b/vendor/github.com/containerd/containerd/services/services.go index 27f47a5cef4e2..d7255169f10a6 100644 --- a/vendor/github.com/containerd/containerd/services/services.go +++ b/vendor/github.com/containerd/containerd/services/services.go @@ -29,10 +29,10 @@ const ( TasksService = "tasks-service" // NamespacesService is id of namespaces service. NamespacesService = "namespaces-service" - // LeasesService is id of leases service. - LeasesService = "leases-service" // DiffService is id of diff service. DiffService = "diff-service" // IntrospectionService is the id of introspection service IntrospectionService = "introspection-service" + // Streaming service is the id of the streaming service + StreamingService = "streaming-service" ) diff --git a/vendor/github.com/containerd/containerd/snapshots/overlay/overlayutils/check.go b/vendor/github.com/containerd/containerd/snapshots/overlay/overlayutils/check.go index 17e7547feb972..d2ac91aa8f935 100644 --- a/vendor/github.com/containerd/containerd/snapshots/overlay/overlayutils/check.go +++ b/vendor/github.com/containerd/containerd/snapshots/overlay/overlayutils/check.go @@ -1,5 +1,4 @@ //go:build linux -// +build linux /* Copyright The containerd Authors. @@ -176,7 +175,7 @@ func NeedsUserXAttr(d string) (bool, error) { } opts := []string{ - fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", filepath.Join(td, "lower2"), filepath.Join(td, "lower1"), filepath.Join(td, "upper"), filepath.Join(td, "work")), + fmt.Sprintf("ro,lowerdir=%s:%s,upperdir=%s,workdir=%s", filepath.Join(td, "lower2"), filepath.Join(td, "lower1"), filepath.Join(td, "upper"), filepath.Join(td, "work")), "userxattr", } diff --git a/vendor/github.com/containerd/containerd/snapshots/proxy/proxy.go b/vendor/github.com/containerd/containerd/snapshots/proxy/proxy.go index 00c320c60872b..3ef3b2698e8e0 100644 --- a/vendor/github.com/containerd/containerd/snapshots/proxy/proxy.go +++ b/vendor/github.com/containerd/containerd/snapshots/proxy/proxy.go @@ -24,8 +24,9 @@ import ( "github.com/containerd/containerd/api/types" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/protobuf" + protobuftypes "github.com/containerd/containerd/protobuf/types" "github.com/containerd/containerd/snapshots" - protobuftypes "github.com/gogo/protobuf/types" ) // NewSnapshotter returns a new Snapshotter which communicates over a GRPC @@ -192,22 +193,22 @@ func (p *proxySnapshotter) Cleanup(ctx context.Context) error { } func toKind(kind snapshotsapi.Kind) snapshots.Kind { - if kind == snapshotsapi.KindActive { + if kind == snapshotsapi.Kind_ACTIVE { return snapshots.KindActive } - if kind == snapshotsapi.KindView { + if kind == snapshotsapi.Kind_VIEW { return snapshots.KindView } return snapshots.KindCommitted } -func toInfo(info snapshotsapi.Info) snapshots.Info { +func toInfo(info *snapshotsapi.Info) snapshots.Info { return snapshots.Info{ Name: info.Name, Parent: info.Parent, Kind: toKind(info.Kind), - Created: info.CreatedAt, - Updated: info.UpdatedAt, + Created: protobuf.FromTimestamp(info.CreatedAt), + Updated: protobuf.FromTimestamp(info.UpdatedAt), Labels: info.Labels, } } @@ -215,7 +216,7 @@ func toInfo(info snapshotsapi.Info) snapshots.Info { func toUsage(resp *snapshotsapi.UsageResponse) snapshots.Usage { return snapshots.Usage{ Inodes: resp.Inodes, - Size: resp.Size_, + Size: resp.Size, } } @@ -225,6 +226,7 @@ func toMounts(mm []*types.Mount) []mount.Mount { mounts[i] = mount.Mount{ Type: m.Type, Source: m.Source, + Target: m.Target, Options: m.Options, } } @@ -233,21 +235,21 @@ func toMounts(mm []*types.Mount) []mount.Mount { func fromKind(kind snapshots.Kind) snapshotsapi.Kind { if kind == snapshots.KindActive { - return snapshotsapi.KindActive + return snapshotsapi.Kind_ACTIVE } if kind == snapshots.KindView { - return snapshotsapi.KindView + return snapshotsapi.Kind_VIEW } - return snapshotsapi.KindCommitted + return snapshotsapi.Kind_COMMITTED } -func fromInfo(info snapshots.Info) snapshotsapi.Info { - return snapshotsapi.Info{ +func fromInfo(info snapshots.Info) *snapshotsapi.Info { + return &snapshotsapi.Info{ Name: info.Name, Parent: info.Parent, Kind: fromKind(info.Kind), - CreatedAt: info.Created, - UpdatedAt: info.Updated, + CreatedAt: protobuf.ToTimestamp(info.Created), + UpdatedAt: protobuf.ToTimestamp(info.Updated), Labels: info.Labels, } } diff --git a/vendor/github.com/containerd/containerd/snapshots/snapshotter.go b/vendor/github.com/containerd/containerd/snapshots/snapshotter.go index e144fb1583f29..5fa5aa530cc57 100644 --- a/vendor/github.com/containerd/containerd/snapshots/snapshotter.go +++ b/vendor/github.com/containerd/containerd/snapshots/snapshotter.go @@ -33,6 +33,11 @@ const ( UnpackKeyFormat = UnpackKeyPrefix + "-%s %s" inheritedLabelsPrefix = "containerd.io/snapshot/" labelSnapshotRef = "containerd.io/snapshot.ref" + + // LabelSnapshotUIDMapping is the label used for UID mappings + LabelSnapshotUIDMapping = "containerd.io/snapshot/uidmapping" + // LabelSnapshotGIDMapping is the label used for GID mappings + LabelSnapshotGIDMapping = "containerd.io/snapshot/gidmapping" ) // Kind identifies the kind of snapshot. @@ -94,7 +99,7 @@ func (k *Kind) UnmarshalJSON(b []byte) error { } // Info provides information about a particular snapshot. -// JSON marshallability is supported for interactive with tools like ctr, +// JSON marshalling is supported for interacting with tools like ctr, type Info struct { Kind Kind // active or committed snapshot Name string // name or key of snapshot @@ -102,8 +107,8 @@ type Info struct { // Labels for a snapshot. // - // Note: only labels prefixed with `containerd.io/snapshot/` will be inherited by the - // snapshotter's `Prepare`, `View`, or `Commit` calls. + // Note: only labels prefixed with `containerd.io/snapshot/` will be inherited + // by the snapshotter's `Prepare`, `View`, or `Commit` calls. Labels map[string]string `json:",omitempty"` Created time.Time `json:",omitempty"` // Created time Updated time.Time `json:",omitempty"` // Last update time @@ -122,7 +127,7 @@ type Usage struct { func (u *Usage) Add(other Usage) { u.Size += other.Size - // TODO(stevvooe): assumes independent inodes, but provides and upper + // TODO(stevvooe): assumes independent inodes, but provides an upper // bound. This should be pretty close, assuming the inodes for a // snapshot are roughly unique to it. Don't trust this assumption. u.Inodes += other.Inodes @@ -165,19 +170,18 @@ type WalkFunc func(context.Context, Info) error // same key space. For example, an active snapshot may not share the same key // with a committed snapshot. // -// We cover several examples below to demonstrate the utility of a snapshot -// snapshotter. +// We cover several examples below to demonstrate the utility of the snapshotter. // // # Importing a Layer // -// To import a layer, we simply have the Snapshotter provide a list of +// To import a layer, we simply have the snapshotter provide a list of // mounts to be applied such that our dst will capture a changeset. We start // out by getting a path to the layer tar file and creating a temp location to // unpack it to: // // layerPath, tmpDir := getLayerPath(), mkTmpDir() // just a path to layer tar file. // -// We start by using a Snapshotter to Prepare a new snapshot transaction, using a +// We start by using the snapshotter to Prepare a new snapshot transaction, using a // key and descending from the empty parent "". To prevent our layer from being // garbage collected during unpacking, we add the `containerd.io/gc.root` label: // @@ -187,7 +191,7 @@ type WalkFunc func(context.Context, Info) error // mounts, err := snapshotter.Prepare(ctx, key, "", noGcOpt) // if err != nil { ... } // -// We get back a list of mounts from Snapshotter.Prepare, with the key identifying +// We get back a list of mounts from snapshotter.Prepare(), with the key identifying // the active snapshot. Mount this to the temporary location with the // following: // @@ -205,7 +209,7 @@ type WalkFunc func(context.Context, Info) error // digest, err := unpackLayer(tmpLocation, layer) // unpack into layer location // if err != nil { ... } // -// When the above completes, we should have a filesystem the represents the +// When the above completes, we should have a filesystem that represents the // contents of the layer. Careful implementations should verify that digest // matches the expected DiffID. When completed, we unmount the mounts: // @@ -218,14 +222,14 @@ type WalkFunc func(context.Context, Info) error // // if err := snapshotter.Commit(ctx, digest.String(), key, noGcOpt); err != nil { ... } // -// Now, we have a layer in the Snapshotter that can be accessed with the digest +// Now, we have a layer in the snapshotter that can be accessed with the digest // provided during commit. // // # Importing the Next Layer // // Making a layer depend on the above is identical to the process described // above except that the parent is provided as parent when calling -// Manager.Prepare, assuming a clean, unique key identifier: +// snapshotter.Prepare(), assuming a clean, unique key identifier: // // mounts, err := snapshotter.Prepare(ctx, key, parentDigest, noGcOpt) // @@ -234,20 +238,20 @@ type WalkFunc func(context.Context, Info) error // // # Running a Container // -// To run a container, we simply provide Snapshotter.Prepare the committed image +// To run a container, we simply provide snapshotter.Prepare() the committed image // snapshot as the parent. After mounting, the prepared path can // be used directly as the container's filesystem: // // mounts, err := snapshotter.Prepare(ctx, containerKey, imageRootFSChainID) // // The returned mounts can then be passed directly to the container runtime. If -// one would like to create a new image from the filesystem, Manager.Commit is +// one would like to create a new image from the filesystem, snapshotter.Commit() is // called: // // if err := snapshotter.Commit(ctx, newImageSnapshot, containerKey); err != nil { ... } // -// Alternatively, for most container runs, Snapshotter.Remove will be called to -// signal the Snapshotter to abandon the changes. +// Alternatively, for most container runs, snapshotter.Remove() will be called to +// signal the snapshotter to abandon the changes. type Snapshotter interface { // Stat returns the info for an active or committed snapshot by name or // key. @@ -267,12 +271,12 @@ type Snapshotter interface { // The running time of this call for active snapshots is dependent on // implementation, but may be proportional to the size of the resource. // Callers should take this into consideration. Implementations should - // attempt to honer context cancellation and avoid taking locks when making + // attempt to honor context cancellation and avoid taking locks when making // the calculation. Usage(ctx context.Context, key string) (Usage, error) // Mounts returns the mounts for the active snapshot transaction identified - // by key. Can be called on an read-write or readonly transaction. This is + // by key. Can be called on a read-write or readonly transaction. This is // available only for active snapshots. // // This can be used to recover mounts after calling View or Prepare. @@ -298,7 +302,7 @@ type Snapshotter interface { // committed back to the snapshot snapshotter. View returns a readonly view on // the parent, with the active snapshot being tracked by the given key. // - // This method operates identically to Prepare, except that Mounts returned + // This method operates identically to Prepare, except the mounts returned // may have the readonly flag set. Any modifications to the underlying // filesystem will be ignored. Implementations may perform this in a more // efficient manner that differs from what would be attempted with diff --git a/vendor/github.com/containerd/containerd/snapshotter_default_unix.go b/vendor/github.com/containerd/containerd/snapshotter_default_unix.go index dcba4792cce4b..8e191ca6ac6cb 100644 --- a/vendor/github.com/containerd/containerd/snapshotter_default_unix.go +++ b/vendor/github.com/containerd/containerd/snapshotter_default_unix.go @@ -1,5 +1,4 @@ //go:build darwin || freebsd || solaris -// +build darwin freebsd solaris /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/snapshotter_opts_unix.go b/vendor/github.com/containerd/containerd/snapshotter_opts_unix.go index 2a2c829f01b2b..4739e192fd28f 100644 --- a/vendor/github.com/containerd/containerd/snapshotter_opts_unix.go +++ b/vendor/github.com/containerd/containerd/snapshotter_opts_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. @@ -20,17 +19,92 @@ package containerd import ( + "context" "fmt" "github.com/containerd/containerd/snapshots" ) +const ( + capabRemapIDs = "remap-ids" +) + // WithRemapperLabels creates the labels used by any supporting snapshotter // to shift the filesystem ownership (user namespace mapping) automatically; currently // supported by the fuse-overlayfs snapshotter func WithRemapperLabels(ctrUID, hostUID, ctrGID, hostGID, length uint32) snapshots.Opt { return snapshots.WithLabels(map[string]string{ - "containerd.io/snapshot/uidmapping": fmt.Sprintf("%d:%d:%d", ctrUID, hostUID, length), - "containerd.io/snapshot/gidmapping": fmt.Sprintf("%d:%d:%d", ctrGID, hostGID, length), - }) + snapshots.LabelSnapshotUIDMapping: fmt.Sprintf("%d:%d:%d", ctrUID, hostUID, length), + snapshots.LabelSnapshotGIDMapping: fmt.Sprintf("%d:%d:%d", ctrGID, hostGID, length)}) +} + +func resolveSnapshotOptions(ctx context.Context, client *Client, snapshotterName string, snapshotter snapshots.Snapshotter, parent string, opts ...snapshots.Opt) (string, error) { + capabs, err := client.GetSnapshotterCapabilities(ctx, snapshotterName) + if err != nil { + return "", err + } + + for _, capab := range capabs { + if capab == capabRemapIDs { + // Snapshotter supports ID remapping, we don't need to do anything. + return parent, nil + } + } + + var local snapshots.Info + for _, opt := range opts { + opt(&local) + } + + needsRemap := false + var uidMap, gidMap string + + if value, ok := local.Labels[snapshots.LabelSnapshotUIDMapping]; ok { + needsRemap = true + uidMap = value + } + if value, ok := local.Labels[snapshots.LabelSnapshotGIDMapping]; ok { + needsRemap = true + gidMap = value + } + + if !needsRemap { + return parent, nil + } + + var ctrUID, hostUID, length uint32 + _, err = fmt.Sscanf(uidMap, "%d:%d:%d", &ctrUID, &hostUID, &length) + if err != nil { + return "", fmt.Errorf("uidMap unparsable: %w", err) + } + + var ctrGID, hostGID, lengthGID uint32 + _, err = fmt.Sscanf(gidMap, "%d:%d:%d", &ctrGID, &hostGID, &lengthGID) + if err != nil { + return "", fmt.Errorf("gidMap unparsable: %w", err) + } + + if ctrUID != 0 || ctrGID != 0 { + return "", fmt.Errorf("Container UID/GID of 0 only supported currently (%d/%d)", ctrUID, ctrGID) + } + + // TODO(dgl): length isn't taken into account for the intermediate snapshot id. + usernsID := fmt.Sprintf("%s-%d-%d", parent, hostUID, hostGID) + if _, err := snapshotter.Stat(ctx, usernsID); err == nil { + return usernsID, nil + } + mounts, err := snapshotter.Prepare(ctx, usernsID+"-remap", parent) + if err != nil { + return "", err + } + // TODO(dgl): length isn't taken into account here yet either. + if err := remapRootFS(ctx, mounts, hostUID, hostGID); err != nil { + snapshotter.Remove(ctx, usernsID+"-remap") + return "", err + } + if err := snapshotter.Commit(ctx, usernsID, usernsID+"-remap"); err != nil { + return "", err + } + + return usernsID, nil } diff --git a/vendor/github.com/containerd/containerd/snapshotter_opts_windows.go b/vendor/github.com/containerd/containerd/snapshotter_opts_windows.go new file mode 100644 index 0000000000000..540bcb3130ab2 --- /dev/null +++ b/vendor/github.com/containerd/containerd/snapshotter_opts_windows.go @@ -0,0 +1,27 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package containerd + +import ( + "context" + + "github.com/containerd/containerd/snapshots" +) + +func resolveSnapshotOptions(ctx context.Context, client *Client, snapshotterName string, snapshotter snapshots.Snapshotter, parent string, opts ...snapshots.Opt) (string, error) { + return parent, nil +} diff --git a/vendor/github.com/containerd/containerd/sys/filesys_deprecated_windows.go b/vendor/github.com/containerd/containerd/sys/filesys_deprecated_windows.go new file mode 100644 index 0000000000000..a59edabeb6510 --- /dev/null +++ b/vendor/github.com/containerd/containerd/sys/filesys_deprecated_windows.go @@ -0,0 +1,51 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package sys + +import ( + "os" + + "github.com/moby/sys/sequential" +) + +// CreateSequential is deprecated. +// +// Deprecated: use github.com/moby/sys/sequential.Create +func CreateSequential(name string) (*os.File, error) { + return sequential.Create(name) +} + +// OpenSequential is deprecated. +// +// Deprecated: use github.com/moby/sys/sequential.Open +func OpenSequential(name string) (*os.File, error) { + return sequential.Open(name) +} + +// OpenFileSequential is deprecated. +// +// Deprecated: use github.com/moby/sys/sequential.OpenFile +func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { + return sequential.OpenFile(name, flag, perm) +} + +// TempFileSequential is deprecated. +// +// Deprecated: use github.com/moby/sys/sequential.CreateTemp +func TempFileSequential(dir, prefix string) (f *os.File, err error) { + return sequential.CreateTemp(dir, prefix) +} diff --git a/vendor/github.com/containerd/containerd/sys/filesys_unix.go b/vendor/github.com/containerd/containerd/sys/filesys_unix.go index 805a7a736fab2..333e85ceb4a85 100644 --- a/vendor/github.com/containerd/containerd/sys/filesys_unix.go +++ b/vendor/github.com/containerd/containerd/sys/filesys_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. @@ -21,11 +20,6 @@ package sys import "os" -// ForceRemoveAll on unix is just a wrapper for os.RemoveAll -func ForceRemoveAll(path string) error { - return os.RemoveAll(path) -} - // MkdirAllWithACL is a wrapper for os.MkdirAll on Unix systems. func MkdirAllWithACL(path string, perm os.FileMode) error { return os.MkdirAll(path, perm) diff --git a/vendor/github.com/containerd/containerd/sys/filesys_windows.go b/vendor/github.com/containerd/containerd/sys/filesys_windows.go index 87ebacc200680..67fc4048c1d54 100644 --- a/vendor/github.com/containerd/containerd/sys/filesys_windows.go +++ b/vendor/github.com/containerd/containerd/sys/filesys_windows.go @@ -17,42 +17,44 @@ package sys import ( - "fmt" "os" - "path/filepath" "regexp" - "sort" - "strconv" - "strings" "syscall" "unsafe" - "github.com/Microsoft/hcsshim" "golang.org/x/sys/windows" ) -const ( - // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System - SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" -) +// SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System. +const SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" + +// volumePath is a regular expression to check if a path is a Windows +// volume path (e.g., "\\?\Volume{4c1b02c1-d990-11dc-99ae-806e6f6e6963}" +// or "\\?\Volume{4c1b02c1-d990-11dc-99ae-806e6f6e6963}\"). +var volumePath = regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}\\?$`) -// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory -// ACL'd for Builtin Administrators and Local System. -func MkdirAllWithACL(path string, perm os.FileMode) error { - return mkdirall(path, true) +// MkdirAllWithACL is a custom version of os.MkdirAll modified for use on Windows +// so that it is both volume path aware, and to create a directory +// an appropriate SDDL defined ACL for Builtin Administrators and Local System. +func MkdirAllWithACL(path string, _ os.FileMode) error { + sa, err := makeSecurityAttributes(SddlAdministratorsLocalSystem) + if err != nil { + return &os.PathError{Op: "mkdirall", Path: path, Err: err} + } + return mkdirall(path, sa) } -// MkdirAll implementation that is volume path aware for Windows. It can be used -// as a drop-in replacement for os.MkdirAll() +// MkdirAll is a custom version of os.MkdirAll that is volume path aware for +// Windows. It can be used as a drop-in replacement for os.MkdirAll. func MkdirAll(path string, _ os.FileMode) error { - return mkdirall(path, false) + return mkdirall(path, nil) } // mkdirall is a custom version of os.MkdirAll modified for use on Windows // so that it is both volume path aware, and can create a directory with // a DACL. -func mkdirall(path string, adminAndLocalSystem bool) error { - if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { +func mkdirall(path string, perm *windows.SecurityAttributes) error { + if volumePath.MatchString(path) { return nil } @@ -65,11 +67,7 @@ func mkdirall(path string, adminAndLocalSystem bool) error { if dir.IsDir() { return nil } - return &os.PathError{ - Op: "mkdir", - Path: path, - Err: syscall.ENOTDIR, - } + return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} } // Slow path: make sure parent exists and then call Mkdir for path. @@ -84,20 +82,15 @@ func mkdirall(path string, adminAndLocalSystem bool) error { } if j > 1 { - // Create parent - err = mkdirall(path[0:j-1], adminAndLocalSystem) + // Create parent. + err = mkdirall(fixRootDirectory(path[:j-1]), perm) if err != nil { return err } } - // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result. - if adminAndLocalSystem { - err = mkdirWithACL(path) - } else { - err = os.Mkdir(path, 0) - } - + // Parent now exists; invoke Mkdir and use its result. + err = mkdirWithACL(path, perm) if err != nil { // Handle arguments like "foo/." by // double-checking that directory doesn't exist. @@ -117,229 +110,42 @@ func mkdirall(path string, adminAndLocalSystem bool) error { // in golang to cater for creating a directory am ACL permitting full // access, with inheritance, to any subfolder/file for Built-in Administrators // and Local System. -func mkdirWithACL(name string) error { - sa := windows.SecurityAttributes{Length: 0} - sd, err := windows.SecurityDescriptorFromString(SddlAdministratorsLocalSystem) - if err != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: err} +func mkdirWithACL(name string, sa *windows.SecurityAttributes) error { + if sa == nil { + return os.Mkdir(name, 0) } - sa.Length = uint32(unsafe.Sizeof(sa)) - sa.InheritHandle = 1 - sa.SecurityDescriptor = sd namep, err := windows.UTF16PtrFromString(name) if err != nil { return &os.PathError{Op: "mkdir", Path: name, Err: err} } - e := windows.CreateDirectory(namep, &sa) - if e != nil { - return &os.PathError{Op: "mkdir", Path: name, Err: e} + err = windows.CreateDirectory(namep, sa) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} } return nil } -// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, -// golang filepath.IsAbs does not consider a path \windows\system32 as absolute -// as it doesn't start with a drive-letter/colon combination. However, in -// docker we need to verify things such as WORKDIR /windows/system32 in -// a Dockerfile (which gets translated to \windows\system32 when being processed -// by the daemon. This SHOULD be treated as absolute from a docker processing -// perspective. -func IsAbs(path string) bool { - if !filepath.IsAbs(path) { - if !strings.HasPrefix(path, string(os.PathSeparator)) { - return false +// fixRootDirectory fixes a reference to a drive's root directory to +// have the required trailing slash. +func fixRootDirectory(p string) string { + if len(p) == len(`\\?\c:`) { + if os.IsPathSeparator(p[0]) && os.IsPathSeparator(p[1]) && p[2] == '?' && os.IsPathSeparator(p[3]) && p[5] == ':' { + return p + `\` } } - return true -} - -// The origin of the functions below here are the golang OS and windows packages, -// slightly modified to only cope with files, not directories due to the -// specific use case. -// -// The alteration is to allow a file on Windows to be opened with -// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating -// the standby list, particularly when accessing large files such as layer.tar. - -// CreateSequential creates the named file with mode 0666 (before umask), truncating -// it if it already exists. If successful, methods on the returned -// File can be used for I/O; the associated file descriptor has mode -// O_RDWR. -// If there is an error, it will be of type *PathError. -func CreateSequential(name string) (*os.File, error) { - return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) -} - -// OpenSequential opens the named file for reading. If successful, methods on -// the returned file can be used for reading; the associated file -// descriptor has mode O_RDONLY. -// If there is an error, it will be of type *PathError. -func OpenSequential(name string) (*os.File, error) { - return OpenFileSequential(name, os.O_RDONLY, 0) -} - -// OpenFileSequential is the generalized open call; most users will use Open -// or Create instead. -// If there is an error, it will be of type *PathError. -func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) { - if name == "" { - return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} - } - r, errf := windowsOpenFileSequential(name, flag, 0) - if errf == nil { - return r, nil - } - return nil, &os.PathError{Op: "open", Path: name, Err: errf} -} - -func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { - r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0) - if e != nil { - return nil, e - } - return os.NewFile(uintptr(r), name), nil + return p } -func makeInheritSa() *windows.SecurityAttributes { +func makeSecurityAttributes(sddl string) (*windows.SecurityAttributes, error) { var sa windows.SecurityAttributes sa.Length = uint32(unsafe.Sizeof(sa)) sa.InheritHandle = 1 - return &sa -} - -func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { - if len(path) == 0 { - return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND - } - pathp, err := windows.UTF16PtrFromString(path) + var err error + sa.SecurityDescriptor, err = windows.SecurityDescriptorFromString(sddl) if err != nil { - return windows.InvalidHandle, err - } - var access uint32 - switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) { - case windows.O_RDONLY: - access = windows.GENERIC_READ - case windows.O_WRONLY: - access = windows.GENERIC_WRITE - case windows.O_RDWR: - access = windows.GENERIC_READ | windows.GENERIC_WRITE - } - if mode&windows.O_CREAT != 0 { - access |= windows.GENERIC_WRITE - } - if mode&windows.O_APPEND != 0 { - access &^= windows.GENERIC_WRITE - access |= windows.FILE_APPEND_DATA - } - sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE) - var sa *windows.SecurityAttributes - if mode&windows.O_CLOEXEC == 0 { - sa = makeInheritSa() - } - var createmode uint32 - switch { - case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL): - createmode = windows.CREATE_NEW - case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC): - createmode = windows.CREATE_ALWAYS - case mode&windows.O_CREAT == windows.O_CREAT: - createmode = windows.OPEN_ALWAYS - case mode&windows.O_TRUNC == windows.O_TRUNC: - createmode = windows.TRUNCATE_EXISTING - default: - createmode = windows.OPEN_EXISTING + return nil, err } - // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. - // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx - const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN - h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) - return h, e -} - -// ForceRemoveAll is the same as os.RemoveAll, but is aware of io.containerd.snapshotter.v1.windows -// and uses hcsshim to unmount and delete container layers contained therein, in the correct order, -// when passed a containerd root data directory (i.e. the `--root` directory for containerd). -func ForceRemoveAll(path string) error { - // snapshots/windows/windows.go init() - const snapshotPlugin = "io.containerd.snapshotter.v1" + "." + "windows" - // snapshots/windows/windows.go NewSnapshotter() - snapshotDir := filepath.Join(path, snapshotPlugin, "snapshots") - if stat, err := os.Stat(snapshotDir); err == nil && stat.IsDir() { - if err := cleanupWCOWLayers(snapshotDir); err != nil { - return fmt.Errorf("failed to cleanup WCOW layers in %s: %w", snapshotDir, err) - } - } - - return os.RemoveAll(path) -} - -func cleanupWCOWLayers(root string) error { - // See snapshots/windows/windows.go getSnapshotDir() - var layerNums []int - var rmLayerNums []int - if err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { - if path != root && info.IsDir() { - name := filepath.Base(path) - if strings.HasPrefix(name, "rm-") { - layerNum, err := strconv.Atoi(strings.TrimPrefix(name, "rm-")) - if err != nil { - return err - } - rmLayerNums = append(rmLayerNums, layerNum) - } else { - layerNum, err := strconv.Atoi(name) - if err != nil { - return err - } - layerNums = append(layerNums, layerNum) - } - return filepath.SkipDir - } - - return nil - }); err != nil { - return err - } - - sort.Sort(sort.Reverse(sort.IntSlice(rmLayerNums))) - for _, rmLayerNum := range rmLayerNums { - if err := cleanupWCOWLayer(filepath.Join(root, "rm-"+strconv.Itoa(rmLayerNum))); err != nil { - return err - } - } - - sort.Sort(sort.Reverse(sort.IntSlice(layerNums))) - for _, layerNum := range layerNums { - if err := cleanupWCOWLayer(filepath.Join(root, strconv.Itoa(layerNum))); err != nil { - return err - } - } - - return nil -} - -func cleanupWCOWLayer(layerPath string) error { - info := hcsshim.DriverInfo{ - HomeDir: filepath.Dir(layerPath), - } - - // ERROR_DEV_NOT_EXIST is returned if the layer is not currently prepared or activated. - // ERROR_FLT_INSTANCE_NOT_FOUND is returned if the layer is currently activated but not prepared. - if err := hcsshim.UnprepareLayer(info, filepath.Base(layerPath)); err != nil { - if hcserror, ok := err.(*hcsshim.HcsError); !ok || (hcserror.Err != windows.ERROR_DEV_NOT_EXIST && hcserror.Err != syscall.Errno(windows.ERROR_FLT_INSTANCE_NOT_FOUND)) { - return fmt.Errorf("failed to unprepare %s: %w", layerPath, err) - } - } - - if err := hcsshim.DeactivateLayer(info, filepath.Base(layerPath)); err != nil { - return fmt.Errorf("failed to deactivate %s: %w", layerPath, err) - } - - if err := hcsshim.DestroyLayer(info, filepath.Base(layerPath)); err != nil { - return fmt.Errorf("failed to destroy %s: %w", layerPath, err) - } - - return nil + return &sa, nil } diff --git a/vendor/github.com/containerd/containerd/sys/oom_unsupported.go b/vendor/github.com/containerd/containerd/sys/oom_unsupported.go index fa0db5a10e0e2..f57977466377f 100644 --- a/vendor/github.com/containerd/containerd/sys/oom_unsupported.go +++ b/vendor/github.com/containerd/containerd/sys/oom_unsupported.go @@ -1,5 +1,4 @@ //go:build !linux -// +build !linux /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/sys/reaper/reaper_unix.go b/vendor/github.com/containerd/containerd/sys/reaper/reaper_unix.go index 6c4f13b9088e9..8172f224e8b3b 100644 --- a/vendor/github.com/containerd/containerd/sys/reaper/reaper_unix.go +++ b/vendor/github.com/containerd/containerd/sys/reaper/reaper_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. @@ -91,7 +90,7 @@ type Monitor struct { subscribers map[chan runc.Exit]*subscriber } -// Start starts the command a registers the process with the reaper +// Start starts the command and registers the process with the reaper func (m *Monitor) Start(c *exec.Cmd) (chan runc.Exit, error) { ec := m.Subscribe() if err := c.Start(); err != nil { diff --git a/vendor/github.com/containerd/containerd/sys/socket_unix.go b/vendor/github.com/containerd/containerd/sys/socket_unix.go index 367e19cad8454..5ecbeddc91429 100644 --- a/vendor/github.com/containerd/containerd/sys/socket_unix.go +++ b/vendor/github.com/containerd/containerd/sys/socket_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/task.go b/vendor/github.com/containerd/containerd/task.go index 105d4fbc31436..be35611e7d439 100644 --- a/vendor/github.com/containerd/containerd/task.go +++ b/vendor/github.com/containerd/containerd/task.go @@ -38,11 +38,12 @@ import ( "github.com/containerd/containerd/mount" "github.com/containerd/containerd/oci" "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/protobuf" + google_protobuf "github.com/containerd/containerd/protobuf/types" "github.com/containerd/containerd/rootfs" "github.com/containerd/containerd/runtime/linux/runctypes" "github.com/containerd/containerd/runtime/v2/runc/options" - "github.com/containerd/typeurl" - google_protobuf "github.com/gogo/protobuf/types" + "github.com/containerd/typeurl/v2" digest "github.com/opencontainers/go-digest" is "github.com/opencontainers/image-spec/specs-go" v1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -264,7 +265,7 @@ func (t *task) Status(ctx context.Context) (Status, error) { return Status{ Status: ProcessStatus(strings.ToLower(r.Process.Status.String())), ExitStatus: r.Process.ExitStatus, - ExitTime: r.Process.ExitedAt, + ExitTime: protobuf.FromTimestamp(r.Process.ExitedAt), }, nil } @@ -284,7 +285,7 @@ func (t *task) Wait(ctx context.Context) (<-chan ExitStatus, error) { } c <- ExitStatus{ code: r.ExitStatus, - exitedAt: r.ExitedAt, + exitedAt: protobuf.FromTimestamp(r.ExitedAt), } }() return c, nil @@ -334,7 +335,7 @@ func (t *task) Delete(ctx context.Context, opts ...ProcessDeleteOpts) (*ExitStat if t.io != nil { t.io.Close() } - return &ExitStatus{code: r.ExitStatus, exitedAt: r.ExitedAt}, nil + return &ExitStatus{code: r.ExitStatus, exitedAt: protobuf.FromTimestamp(r.ExitedAt)}, nil } func (t *task) Exec(ctx context.Context, id string, spec *specs.Process, ioCreate cio.Creator) (_ Process, err error) { @@ -351,7 +352,7 @@ func (t *task) Exec(ctx context.Context, id string, spec *specs.Process, ioCreat i.Close() } }() - any, err := typeurl.MarshalAny(spec) + any, err := protobuf.MarshalAnyToProto(spec) if err != nil { return nil, err } @@ -449,9 +450,9 @@ func (t *task) Checkpoint(ctx context.Context, opts ...CheckpointTaskOpts) (Imag if i.Name == "" { i.Name = fmt.Sprintf(checkpointNameFormat, t.id, time.Now().Format(checkpointDateFormat)) } - request.ParentCheckpoint = i.ParentCheckpoint + request.ParentCheckpoint = i.ParentCheckpoint.String() if i.Options != nil { - any, err := typeurl.MarshalAny(i.Options) + any, err := protobuf.MarshalAnyToProto(i.Options) if err != nil { return nil, err } @@ -540,7 +541,7 @@ func (t *task) Update(ctx context.Context, opts ...UpdateTaskOpts) error { if err != nil { return err } - request.Resources = any + request.Resources = protobuf.FromAny(any) } if i.Annotations != nil { request.Annotations = i.Annotations @@ -608,8 +609,8 @@ func (t *task) checkpointTask(ctx context.Context, index *v1.Index, request *tas for _, d := range response.Descriptors { index.Manifests = append(index.Manifests, v1.Descriptor{ MediaType: d.MediaType, - Size: d.Size_, - Digest: d.Digest, + Size: d.Size, + Digest: digest.Digest(d.Digest), Platform: &v1.Platform{ OS: goruntime.GOOS, Architecture: goruntime.GOARCH, diff --git a/vendor/github.com/containerd/containerd/task_opts.go b/vendor/github.com/containerd/containerd/task_opts.go index 56f3cbad60dcc..e372ca4451e04 100644 --- a/vendor/github.com/containerd/containerd/task_opts.go +++ b/vendor/github.com/containerd/containerd/task_opts.go @@ -69,8 +69,8 @@ func WithTaskCheckpoint(im Image) NewTaskOpts { if m.MediaType == images.MediaTypeContainerd1Checkpoint { info.Checkpoint = &types.Descriptor{ MediaType: m.MediaType, - Size_: m.Size, - Digest: m.Digest, + Size: m.Size, + Digest: m.Digest.String(), Annotations: m.Annotations, } return nil diff --git a/vendor/github.com/containerd/containerd/task_opts_unix.go b/vendor/github.com/containerd/containerd/task_opts_unix.go index 1d5983b6294f7..5e5e66a6f854d 100644 --- a/vendor/github.com/containerd/containerd/task_opts_unix.go +++ b/vendor/github.com/containerd/containerd/task_opts_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows /* Copyright The containerd Authors. diff --git a/vendor/github.com/containerd/containerd/tracing/helpers.go b/vendor/github.com/containerd/containerd/tracing/helpers.go new file mode 100644 index 0000000000000..981da6c79532a --- /dev/null +++ b/vendor/github.com/containerd/containerd/tracing/helpers.go @@ -0,0 +1,94 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package tracing + +import ( + "encoding/json" + "fmt" + "strings" + + "go.opentelemetry.io/otel/attribute" +) + +const ( + spanDelimiter = "." +) + +func makeSpanName(names ...string) string { + return strings.Join(names, spanDelimiter) +} + +func any(k string, v interface{}) attribute.KeyValue { + if v == nil { + return attribute.String(k, "") + } + + switch typed := v.(type) { + case bool: + return attribute.Bool(k, typed) + case []bool: + return attribute.BoolSlice(k, typed) + case int: + return attribute.Int(k, typed) + case []int: + return attribute.IntSlice(k, typed) + case int8: + return attribute.Int(k, int(typed)) + case []int8: + ls := make([]int, 0, len(typed)) + for _, i := range typed { + ls = append(ls, int(i)) + } + return attribute.IntSlice(k, ls) + case int16: + return attribute.Int(k, int(typed)) + case []int16: + ls := make([]int, 0, len(typed)) + for _, i := range typed { + ls = append(ls, int(i)) + } + return attribute.IntSlice(k, ls) + case int32: + return attribute.Int64(k, int64(typed)) + case []int32: + ls := make([]int64, 0, len(typed)) + for _, i := range typed { + ls = append(ls, int64(i)) + } + return attribute.Int64Slice(k, ls) + case int64: + return attribute.Int64(k, typed) + case []int64: + return attribute.Int64Slice(k, typed) + case float64: + return attribute.Float64(k, typed) + case []float64: + return attribute.Float64Slice(k, typed) + case string: + return attribute.String(k, typed) + case []string: + return attribute.StringSlice(k, typed) + } + + if stringer, ok := v.(fmt.Stringer); ok { + return attribute.String(k, stringer.String()) + } + if b, err := json.Marshal(v); b != nil && err == nil { + return attribute.String(k, string(b)) + } + return attribute.String(k, fmt.Sprintf("%v", v)) +} diff --git a/vendor/github.com/containerd/containerd/tracing/log.go b/vendor/github.com/containerd/containerd/tracing/log.go new file mode 100644 index 0000000000000..98fa16f931e28 --- /dev/null +++ b/vendor/github.com/containerd/containerd/tracing/log.go @@ -0,0 +1,66 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package tracing + +import ( + "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// NewLogrusHook creates a new logrus hook +func NewLogrusHook() *LogrusHook { + return &LogrusHook{} +} + +// LogrusHook is a logrus hook which adds logrus events to active spans. +// If the span is not recording or the span context is invalid, the hook is a no-op. +type LogrusHook struct{} + +// Levels returns the logrus levels that this hook is interested in. +func (h *LogrusHook) Levels() []logrus.Level { + return logrus.AllLevels +} + +// Fire is called when a log event occurs. +func (h *LogrusHook) Fire(entry *logrus.Entry) error { + span := trace.SpanFromContext(entry.Context) + if span == nil { + return nil + } + + if !span.SpanContext().IsValid() || !span.IsRecording() { + return nil + } + + span.AddEvent( + entry.Message, + trace.WithAttributes(logrusDataToAttrs(entry.Data)...), + trace.WithAttributes(attribute.String("level", entry.Level.String())), + trace.WithTimestamp(entry.Time), + ) + + return nil +} + +func logrusDataToAttrs(data logrus.Fields) []attribute.KeyValue { + attrs := make([]attribute.KeyValue, 0, len(data)) + for k, v := range data { + attrs = append(attrs, any(k, v)) + } + return attrs +} diff --git a/vendor/github.com/containerd/containerd/tracing/tracing.go b/vendor/github.com/containerd/containerd/tracing/tracing.go new file mode 100644 index 0000000000000..7fe7bfd5bf228 --- /dev/null +++ b/vendor/github.com/containerd/containerd/tracing/tracing.go @@ -0,0 +1,117 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package tracing + +import ( + "context" + "net/http" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" + httpconv "go.opentelemetry.io/otel/semconv/v1.17.0/httpconv" + "go.opentelemetry.io/otel/trace" +) + +// StartConfig defines configuration for a new span object. +type StartConfig struct { + spanOpts []trace.SpanStartOption +} + +type SpanOpt func(config *StartConfig) + +// WithHTTPRequest marks span as a HTTP request operation from client to server. +// It'll append attributes from the HTTP request object and mark it with `SpanKindClient` type. +func WithHTTPRequest(request *http.Request) SpanOpt { + return func(config *StartConfig) { + config.spanOpts = append(config.spanOpts, + trace.WithSpanKind(trace.SpanKindClient), // A client making a request to a server + trace.WithAttributes(httpconv.ClientRequest(request)...), // Add HTTP attributes + ) + } +} + +// StartSpan starts child span in a context. +func StartSpan(ctx context.Context, opName string, opts ...SpanOpt) (context.Context, *Span) { + config := StartConfig{} + for _, fn := range opts { + fn(&config) + } + tracer := otel.Tracer("") + if parent := trace.SpanFromContext(ctx); parent != nil && parent.SpanContext().IsValid() { + tracer = parent.TracerProvider().Tracer("") + } + ctx, span := tracer.Start(ctx, opName, config.spanOpts...) + return ctx, &Span{otelSpan: span} +} + +// SpanFromContext returns the current Span from the context. +func SpanFromContext(ctx context.Context) *Span { + return &Span{ + otelSpan: trace.SpanFromContext(ctx), + } +} + +// Span is wrapper around otel trace.Span. +// Span is the individual component of a trace. It represents a +// single named and timed operation of a workflow that is traced. +type Span struct { + otelSpan trace.Span +} + +// End completes the span. +func (s *Span) End() { + s.otelSpan.End() +} + +// AddEvent adds an event with provided name and options. +func (s *Span) AddEvent(name string, options ...trace.EventOption) { + s.otelSpan.AddEvent(name, options...) +} + +// SetStatus sets the status of the current span. +// If an error is encountered, it records the error and sets span status to Error. +func (s *Span) SetStatus(err error) { + if err != nil { + s.otelSpan.RecordError(err) + s.otelSpan.SetStatus(codes.Error, err.Error()) + } else { + s.otelSpan.SetStatus(codes.Ok, "") + } +} + +// SetAttributes sets kv as attributes of the span. +func (s *Span) SetAttributes(kv ...attribute.KeyValue) { + s.otelSpan.SetAttributes(kv...) +} + +// Name sets the span name by joining a list of strings in dot separated format. +func Name(names ...string) string { + return makeSpanName(names...) +} + +// Attribute takes a key value pair and returns attribute.KeyValue type. +func Attribute(k string, v interface{}) attribute.KeyValue { + return any(k, v) +} + +// HTTPStatusCodeAttributes generates attributes of the HTTP namespace as specified by the OpenTelemetry +// specification for a span. +func HTTPStatusCodeAttributes(code int) []attribute.KeyValue { + return []attribute.KeyValue{semconv.HTTPStatusCodeKey.Int(code)} +} diff --git a/vendor/github.com/containerd/containerd/transfer.go b/vendor/github.com/containerd/containerd/transfer.go new file mode 100644 index 0000000000000..9979aa75bd9be --- /dev/null +++ b/vendor/github.com/containerd/containerd/transfer.go @@ -0,0 +1,109 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package containerd + +import ( + "context" + "errors" + "io" + + streamingapi "github.com/containerd/containerd/api/services/streaming/v1" + transferapi "github.com/containerd/containerd/api/services/transfer/v1" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/pkg/streaming" + "github.com/containerd/containerd/pkg/transfer" + "github.com/containerd/containerd/pkg/transfer/proxy" + "github.com/containerd/containerd/protobuf" + "github.com/containerd/typeurl/v2" +) + +func (c *Client) Transfer(ctx context.Context, src interface{}, dest interface{}, opts ...transfer.Opt) error { + ctx, done, err := c.WithLease(ctx) + if err != nil { + return err + } + defer done(ctx) + + return proxy.NewTransferrer(transferapi.NewTransferClient(c.conn), c.streamCreator()).Transfer(ctx, src, dest, opts...) +} + +func (c *Client) streamCreator() streaming.StreamCreator { + return &streamCreator{ + client: streamingapi.NewStreamingClient(c.conn), + } +} + +type streamCreator struct { + client streamingapi.StreamingClient +} + +func (sc *streamCreator) Create(ctx context.Context, id string) (streaming.Stream, error) { + stream, err := sc.client.Stream(ctx) + if err != nil { + return nil, err + } + + a, err := typeurl.MarshalAny(&streamingapi.StreamInit{ + ID: id, + }) + if err != nil { + return nil, err + } + err = stream.Send(protobuf.FromAny(a)) + if err != nil { + if !errors.Is(err, io.EOF) { + err = errdefs.FromGRPC(err) + } + return nil, err + } + + // Receive an ack that stream is init and ready + if _, err = stream.Recv(); err != nil { + if !errors.Is(err, io.EOF) { + err = errdefs.FromGRPC(err) + } + return nil, err + } + + return &clientStream{ + s: stream, + }, nil +} + +type clientStream struct { + s streamingapi.Streaming_StreamClient +} + +func (cs *clientStream) Send(a typeurl.Any) (err error) { + err = cs.s.Send(protobuf.FromAny(a)) + if !errors.Is(err, io.EOF) { + err = errdefs.FromGRPC(err) + } + return +} + +func (cs *clientStream) Recv() (a typeurl.Any, err error) { + a, err = cs.s.Recv() + if !errors.Is(err, io.EOF) { + err = errdefs.FromGRPC(err) + } + return +} + +func (cs *clientStream) Close() error { + return cs.s.CloseSend() +} diff --git a/vendor/github.com/containerd/containerd/unpacker.go b/vendor/github.com/containerd/containerd/unpacker.go deleted file mode 100644 index 03cf7554e6a6c..0000000000000 --- a/vendor/github.com/containerd/containerd/unpacker.go +++ /dev/null @@ -1,427 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package containerd - -import ( - "context" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "math/rand" - "sync" - "sync/atomic" - "time" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/images" - "github.com/containerd/containerd/log" - "github.com/containerd/containerd/mount" - "github.com/containerd/containerd/pkg/kmutex" - "github.com/containerd/containerd/platforms" - "github.com/containerd/containerd/snapshots" - "github.com/opencontainers/go-digest" - "github.com/opencontainers/image-spec/identity" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" - "golang.org/x/sync/errgroup" - "golang.org/x/sync/semaphore" -) - -const ( - labelSnapshotRef = "containerd.io/snapshot.ref" -) - -type unpacker struct { - updateCh chan ocispec.Descriptor - snapshotter string - config UnpackConfig - c *Client - limiter *semaphore.Weighted -} - -func (c *Client) newUnpacker(ctx context.Context, rCtx *RemoteContext) (*unpacker, error) { - snapshotter, err := c.resolveSnapshotterName(ctx, rCtx.Snapshotter) - if err != nil { - return nil, err - } - var config = UnpackConfig{ - DuplicationSuppressor: kmutex.NewNoop(), - } - for _, o := range rCtx.UnpackOpts { - if err := o(ctx, &config); err != nil { - return nil, err - } - } - var limiter *semaphore.Weighted - if rCtx.MaxConcurrentDownloads > 0 { - limiter = semaphore.NewWeighted(int64(rCtx.MaxConcurrentDownloads)) - } - return &unpacker{ - updateCh: make(chan ocispec.Descriptor, 128), - snapshotter: snapshotter, - config: config, - c: c, - limiter: limiter, - }, nil -} - -func (u *unpacker) unpack( - ctx context.Context, - rCtx *RemoteContext, - h images.Handler, - config ocispec.Descriptor, - layers []ocispec.Descriptor, -) error { - p, err := content.ReadBlob(ctx, u.c.ContentStore(), config) - if err != nil { - return err - } - - var i ocispec.Image - if err := json.Unmarshal(p, &i); err != nil { - return fmt.Errorf("unmarshal image config: %w", err) - } - diffIDs := i.RootFS.DiffIDs - if len(layers) != len(diffIDs) { - return fmt.Errorf("number of layers and diffIDs don't match: %d != %d", len(layers), len(diffIDs)) - } - - if u.config.CheckPlatformSupported { - imgPlatform := platforms.Normalize(ocispec.Platform{OS: i.OS, Architecture: i.Architecture}) - snapshotterPlatformMatcher, err := u.c.GetSnapshotterSupportedPlatforms(ctx, u.snapshotter) - if err != nil { - return fmt.Errorf("failed to find supported platforms for snapshotter %s: %w", u.snapshotter, err) - } - if !snapshotterPlatformMatcher.Match(imgPlatform) { - return fmt.Errorf("snapshotter %s does not support platform %s for image %s", u.snapshotter, imgPlatform, config.Digest) - } - } - - var ( - sn = u.c.SnapshotService(u.snapshotter) - a = u.c.DiffService() - cs = u.c.ContentStore() - - chain []digest.Digest - - fetchOffset int - fetchC []chan struct{} - fetchErr chan error - ) - - // If there is an early return, ensure any ongoing - // fetches get their context cancelled - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - doUnpackFn := func(i int, desc ocispec.Descriptor) error { - parent := identity.ChainID(chain) - chain = append(chain, diffIDs[i]) - chainID := identity.ChainID(chain).String() - - unlock, err := u.lockSnChainID(ctx, chainID) - if err != nil { - return err - } - defer unlock() - - if _, err := sn.Stat(ctx, chainID); err == nil { - // no need to handle - return nil - } else if !errdefs.IsNotFound(err) { - return fmt.Errorf("failed to stat snapshot %s: %w", chainID, err) - } - - // inherits annotations which are provided as snapshot labels. - labels := snapshots.FilterInheritedLabels(desc.Annotations) - if labels == nil { - labels = make(map[string]string) - } - labels[labelSnapshotRef] = chainID - - var ( - key string - mounts []mount.Mount - opts = append(rCtx.SnapshotterOpts, snapshots.WithLabels(labels)) - ) - - for try := 1; try <= 3; try++ { - // Prepare snapshot with from parent, label as root - key = fmt.Sprintf(snapshots.UnpackKeyFormat, uniquePart(), chainID) - mounts, err = sn.Prepare(ctx, key, parent.String(), opts...) - if err != nil { - if errdefs.IsAlreadyExists(err) { - if _, err := sn.Stat(ctx, chainID); err != nil { - if !errdefs.IsNotFound(err) { - return fmt.Errorf("failed to stat snapshot %s: %w", chainID, err) - } - // Try again, this should be rare, log it - log.G(ctx).WithField("key", key).WithField("chainid", chainID).Debug("extraction snapshot already exists, chain id not found") - } else { - // no need to handle, snapshot now found with chain id - return nil - } - } else { - return fmt.Errorf("failed to prepare extraction snapshot %q: %w", key, err) - } - } else { - break - } - } - if err != nil { - return fmt.Errorf("unable to prepare extraction snapshot: %w", err) - } - - // Abort the snapshot if commit does not happen - abort := func() { - if err := sn.Remove(ctx, key); err != nil { - log.G(ctx).WithError(err).Errorf("failed to cleanup %q", key) - } - } - - if fetchErr == nil { - fetchErr = make(chan error, 1) - fetchOffset = i - fetchC = make([]chan struct{}, len(layers)-fetchOffset) - for i := range fetchC { - fetchC[i] = make(chan struct{}) - } - - go func(i int) { - err := u.fetch(ctx, h, layers[i:], fetchC) - if err != nil { - fetchErr <- err - } - close(fetchErr) - }(i) - } - - select { - case <-ctx.Done(): - return ctx.Err() - case err := <-fetchErr: - if err != nil { - return err - } - case <-fetchC[i-fetchOffset]: - } - - diff, err := a.Apply(ctx, desc, mounts, u.config.ApplyOpts...) - if err != nil { - abort() - return fmt.Errorf("failed to extract layer %s: %w", diffIDs[i], err) - } - if diff.Digest != diffIDs[i] { - abort() - return fmt.Errorf("wrong diff id calculated on extraction %q", diffIDs[i]) - } - - if err = sn.Commit(ctx, chainID, key, opts...); err != nil { - abort() - if errdefs.IsAlreadyExists(err) { - return nil - } - return fmt.Errorf("failed to commit snapshot %s: %w", key, err) - } - - // Set the uncompressed label after the uncompressed - // digest has been verified through apply. - cinfo := content.Info{ - Digest: desc.Digest, - Labels: map[string]string{ - "containerd.io/uncompressed": diff.Digest.String(), - }, - } - if _, err := cs.Update(ctx, cinfo, "labels.containerd.io/uncompressed"); err != nil { - return err - } - return nil - } - - for i, desc := range layers { - if err := doUnpackFn(i, desc); err != nil { - return err - } - } - - chainID := identity.ChainID(chain).String() - cinfo := content.Info{ - Digest: config.Digest, - Labels: map[string]string{ - fmt.Sprintf("containerd.io/gc.ref.snapshot.%s", u.snapshotter): chainID, - }, - } - _, err = cs.Update(ctx, cinfo, fmt.Sprintf("labels.containerd.io/gc.ref.snapshot.%s", u.snapshotter)) - if err != nil { - return err - } - log.G(ctx).WithFields(logrus.Fields{ - "config": config.Digest, - "chainID": chainID, - }).Debug("image unpacked") - - return nil -} - -func (u *unpacker) fetch(ctx context.Context, h images.Handler, layers []ocispec.Descriptor, done []chan struct{}) error { - eg, ctx2 := errgroup.WithContext(ctx) - for i, desc := range layers { - desc := desc - i := i - - if err := u.acquire(ctx); err != nil { - return err - } - - eg.Go(func() error { - unlock, err := u.lockBlobDescriptor(ctx2, desc) - if err != nil { - u.release() - return err - } - - _, err = h.Handle(ctx2, desc) - - unlock() - u.release() - - if err != nil && !errors.Is(err, images.ErrSkipDesc) { - return err - } - close(done[i]) - - return nil - }) - } - - return eg.Wait() -} - -func (u *unpacker) handlerWrapper( - uctx context.Context, - rCtx *RemoteContext, - unpacks *int32, -) (func(images.Handler) images.Handler, *errgroup.Group) { - eg, uctx := errgroup.WithContext(uctx) - return func(f images.Handler) images.Handler { - var ( - lock sync.Mutex - layers = map[digest.Digest][]ocispec.Descriptor{} - ) - return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - unlock, err := u.lockBlobDescriptor(ctx, desc) - if err != nil { - return nil, err - } - - children, err := f.Handle(ctx, desc) - unlock() - if err != nil { - return children, err - } - - switch desc.MediaType { - case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: - var nonLayers []ocispec.Descriptor - var manifestLayers []ocispec.Descriptor - - // Split layers from non-layers, layers will be handled after - // the config - for _, child := range children { - if images.IsLayerType(child.MediaType) { - manifestLayers = append(manifestLayers, child) - } else { - nonLayers = append(nonLayers, child) - } - } - - lock.Lock() - for _, nl := range nonLayers { - layers[nl.Digest] = manifestLayers - } - lock.Unlock() - - children = nonLayers - case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: - lock.Lock() - l := layers[desc.Digest] - lock.Unlock() - if len(l) > 0 { - atomic.AddInt32(unpacks, 1) - eg.Go(func() error { - return u.unpack(uctx, rCtx, f, desc, l) - }) - } - } - return children, nil - }) - }, eg -} - -func (u *unpacker) acquire(ctx context.Context) error { - if u.limiter == nil { - return nil - } - return u.limiter.Acquire(ctx, 1) -} - -func (u *unpacker) release() { - if u.limiter == nil { - return - } - u.limiter.Release(1) -} - -func (u *unpacker) lockSnChainID(ctx context.Context, chainID string) (func(), error) { - key := u.makeChainIDKeyWithSnapshotter(chainID) - - if err := u.config.DuplicationSuppressor.Lock(ctx, key); err != nil { - return nil, err - } - return func() { - u.config.DuplicationSuppressor.Unlock(key) - }, nil -} - -func (u *unpacker) lockBlobDescriptor(ctx context.Context, desc ocispec.Descriptor) (func(), error) { - key := u.makeBlobDescriptorKey(desc) - - if err := u.config.DuplicationSuppressor.Lock(ctx, key); err != nil { - return nil, err - } - return func() { - u.config.DuplicationSuppressor.Unlock(key) - }, nil -} - -func (u *unpacker) makeChainIDKeyWithSnapshotter(chainID string) string { - return fmt.Sprintf("sn://%s/%v", u.snapshotter, chainID) -} - -func (u *unpacker) makeBlobDescriptorKey(desc ocispec.Descriptor) string { - return fmt.Sprintf("blob://%v", desc.Digest) -} - -func uniquePart() string { - t := time.Now() - var b [3]byte - // Ignore read failures, just decreases uniqueness - rand.Read(b[:]) - return fmt.Sprintf("%d-%s", t.Nanosecond(), base64.URLEncoding.EncodeToString(b[:])) -} diff --git a/vendor/github.com/containerd/containerd/version/version.go b/vendor/github.com/containerd/containerd/version/version.go index 6bc3ed178aef3..7e081ae279ccd 100644 --- a/vendor/github.com/containerd/containerd/version/version.go +++ b/vendor/github.com/containerd/containerd/version/version.go @@ -23,7 +23,7 @@ var ( Package = "github.com/containerd/containerd" // Version holds the complete version number. Filled in at linking time. - Version = "1.6.19+unknown" + Version = "1.7.0-rc.2+unknown" // Revision is filled with the VCS (e.g. git) revision being used to build // the program at linking time. diff --git a/vendor/github.com/containerd/go-cni/.golangci.yml b/vendor/github.com/containerd/go-cni/.golangci.yml index 673fd33a2f987..1e066a10c3570 100644 --- a/vendor/github.com/containerd/go-cni/.golangci.yml +++ b/vendor/github.com/containerd/go-cni/.golangci.yml @@ -1,7 +1,5 @@ linters: enable: - - structcheck - - varcheck - staticcheck - unconvert - gofmt diff --git a/vendor/github.com/containerd/go-cni/namespace_opts.go b/vendor/github.com/containerd/go-cni/namespace_opts.go index 3387f6fd13113..ae4e37cc832ab 100644 --- a/vendor/github.com/containerd/go-cni/namespace_opts.go +++ b/vendor/github.com/containerd/go-cni/namespace_opts.go @@ -50,6 +50,14 @@ func WithCapabilityDNS(dns DNS) NamespaceOpts { } } +// WithCapabilityCgroupPath passes in the cgroup path capability. +func WithCapabilityCgroupPath(cgroupPath string) NamespaceOpts { + return func(c *Namespace) error { + c.capabilityArgs["cgroupPath"] = cgroupPath + return nil + } +} + // WithCapability support well-known capabilities // https://www.cni.dev/docs/conventions/#well-known-capabilities func WithCapability(name string, capability interface{}) NamespaceOpts { diff --git a/vendor/github.com/containerd/go-cni/result.go b/vendor/github.com/containerd/go-cni/result.go index 7bc115543bb09..02957ddc4ed85 100644 --- a/vendor/github.com/containerd/go-cni/result.go +++ b/vendor/github.com/containerd/go-cni/result.go @@ -32,13 +32,18 @@ type IPConfig struct { // Result contains the network information returned by CNI.Setup // // a) Interfaces list. Depending on the plugin, this can include the sandbox -// (eg, container or hypervisor) interface name and/or the host interface -// name, the hardware addresses of each interface, and details about the -// sandbox (if any) the interface is in. +// +// (eg, container or hypervisor) interface name and/or the host interface +// name, the hardware addresses of each interface, and details about the +// sandbox (if any) the interface is in. +// // b) IP configuration assigned to each interface. The IPv4 and/or IPv6 addresses, -// gateways, and routes assigned to sandbox and/or host interfaces. +// +// gateways, and routes assigned to sandbox and/or host interfaces. +// // c) DNS information. Dictionary that includes DNS information for nameservers, -// domain, search domains and options. +// +// domain, search domains and options. type Result struct { Interfaces map[string]*Config DNS []types.DNS diff --git a/vendor/github.com/containerd/go-cni/testutils.go b/vendor/github.com/containerd/go-cni/testutils.go index d9453c8d983d9..0807e209770ed 100644 --- a/vendor/github.com/containerd/go-cni/testutils.go +++ b/vendor/github.com/containerd/go-cni/testutils.go @@ -18,14 +18,13 @@ package cni import ( "fmt" - "io/ioutil" "os" "path" "testing" ) func makeTmpDir(prefix string) (string, error) { - tmpDir, err := ioutil.TempDir(os.TempDir(), prefix) + tmpDir, err := os.MkdirTemp("", prefix) if err != nil { return "", err } diff --git a/vendor/github.com/containerd/go-cni/types.go b/vendor/github.com/containerd/go-cni/types.go index 0b7db1ee0a637..18616c05b9e04 100644 --- a/vendor/github.com/containerd/go-cni/types.go +++ b/vendor/github.com/containerd/go-cni/types.go @@ -17,12 +17,9 @@ package cni const ( - CNIPluginName = "cni" - DefaultNetDir = "/etc/cni/net.d" - DefaultCNIDir = "/opt/cni/bin" - DefaultMaxConfNum = 1 - VendorCNIDirTemplate = "%s/opt/%s/bin" - DefaultPrefix = "eth" + CNIPluginName = "cni" + DefaultMaxConfNum = 1 + DefaultPrefix = "eth" ) type config struct { diff --git a/vendor/github.com/containerd/go-cni/types_others.go b/vendor/github.com/containerd/go-cni/types_others.go new file mode 100644 index 0000000000000..4550ca575e32b --- /dev/null +++ b/vendor/github.com/containerd/go-cni/types_others.go @@ -0,0 +1,25 @@ +//go:build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cni + +const ( + DefaultNetDir = "/etc/cni/net.d" + DefaultCNIDir = "/opt/cni/bin" + VendorCNIDirTemplate = "%s/opt/%s/bin" +) diff --git a/vendor/github.com/containerd/go-cni/types_windows.go b/vendor/github.com/containerd/go-cni/types_windows.go new file mode 100644 index 0000000000000..5e174e0613de8 --- /dev/null +++ b/vendor/github.com/containerd/go-cni/types_windows.go @@ -0,0 +1,24 @@ +//go:build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cni + +const ( + DefaultNetDir = "C:\\Program Files\\containerd\\cni\\conf" + DefaultCNIDir = "C:\\Program Files\\containerd\\cni\\bin" +) diff --git a/vendor/github.com/containerd/ttrpc/.gitattributes b/vendor/github.com/containerd/ttrpc/.gitattributes new file mode 100644 index 0000000000000..d207b1802b204 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/.gitattributes @@ -0,0 +1 @@ +*.go text eol=lf diff --git a/vendor/github.com/containerd/ttrpc/.gitignore b/vendor/github.com/containerd/ttrpc/.gitignore index ea58090bd21e1..88ceb2764bd14 100644 --- a/vendor/github.com/containerd/ttrpc/.gitignore +++ b/vendor/github.com/containerd/ttrpc/.gitignore @@ -1,4 +1,5 @@ # Binaries for programs and plugins +/bin/ *.exe *.dll *.so @@ -9,3 +10,4 @@ # Output of the go coverage tool, specifically when used with LiteIDE *.out +coverage.txt diff --git a/vendor/github.com/containerd/ttrpc/.golangci.yml b/vendor/github.com/containerd/ttrpc/.golangci.yml new file mode 100644 index 0000000000000..c8be4980c2bd9 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/.golangci.yml @@ -0,0 +1,54 @@ +linters: + enable: + - structcheck + - varcheck + - staticcheck + - unconvert + - gofmt + - goimports + - revive + - ineffassign + - vet + - unused + - misspell + disable: + - errcheck + +linters-settings: + revive: + ignore-generated-headers: true + rules: + - name: blank-imports + - name: context-as-argument + - name: context-keys-type + - name: dot-imports + - name: error-return + - name: error-strings + - name: error-naming + - name: exported + - name: if-return + - name: increment-decrement + - name: var-naming + arguments: [["UID", "GID"], []] + - name: var-declaration + - name: package-comments + - name: range + - name: receiver-naming + - name: time-naming + - name: unexported-return + - name: indent-error-flow + - name: errorf + - name: empty-block + - name: superfluous-else + - name: unused-parameter + - name: unreachable-code + - name: redefines-builtin-id + +issues: + include: + - EXC0002 + +run: + timeout: 8m + skip-dirs: + - example diff --git a/vendor/github.com/containerd/ttrpc/Makefile b/vendor/github.com/containerd/ttrpc/Makefile new file mode 100644 index 0000000000000..474194239d8a6 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/Makefile @@ -0,0 +1,180 @@ +# Copyright The containerd Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Go command to use for build +GO ?= go +INSTALL ?= install + +# Root directory of the project (absolute path). +ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST)))) + +WHALE = "🇩" +ONI = "👹" + +# Project binaries. +COMMANDS=protoc-gen-go-ttrpc protoc-gen-gogottrpc + +ifdef BUILDTAGS + GO_BUILDTAGS = ${BUILDTAGS} +endif +GO_BUILDTAGS ?= +GO_TAGS=$(if $(GO_BUILDTAGS),-tags "$(strip $(GO_BUILDTAGS))",) + +# Project packages. +PACKAGES=$(shell $(GO) list ${GO_TAGS} ./... | grep -v /example) +TESTPACKAGES=$(shell $(GO) list ${GO_TAGS} ./... | grep -v /cmd | grep -v /integration | grep -v /example) +BINPACKAGES=$(addprefix ./cmd/,$(COMMANDS)) + +#Replaces ":" (*nix), ";" (windows) with newline for easy parsing +GOPATHS=$(shell echo ${GOPATH} | tr ":" "\n" | tr ";" "\n") + +TESTFLAGS_RACE= +GO_BUILD_FLAGS= +# See Golang issue re: '-trimpath': https://github.com/golang/go/issues/13809 +GO_GCFLAGS=$(shell \ + set -- ${GOPATHS}; \ + echo "-gcflags=-trimpath=$${1}/src"; \ + ) + +BINARIES=$(addprefix bin/,$(COMMANDS)) + +# Flags passed to `go test` +TESTFLAGS ?= $(TESTFLAGS_RACE) $(EXTRA_TESTFLAGS) +TESTFLAGS_PARALLEL ?= 8 + +# Use this to replace `go test` with, for instance, `gotestsum` +GOTEST ?= $(GO) test + +.PHONY: clean all AUTHORS build binaries test integration generate protos check-protos coverage ci check help install vendor install-protobuf install-protobuild +.DEFAULT: default + +# Forcibly set the default goal to all, in case an include above brought in a rule definition. +.DEFAULT_GOAL := all + +all: binaries + +check: proto-fmt ## run all linters + @echo "$(WHALE) $@" + GOGC=75 golangci-lint run + +ci: check binaries check-protos coverage # coverage-integration ## to be used by the CI + +AUTHORS: .mailmap .git/HEAD + git log --format='%aN <%aE>' | sort -fu > $@ + +generate: protos + @echo "$(WHALE) $@" + @PATH="${ROOTDIR}/bin:${PATH}" $(GO) generate -x ${PACKAGES} + +protos: bin/protoc-gen-gogottrpc bin/protoc-gen-go-ttrpc ## generate protobuf + @echo "$(WHALE) $@" + @(PATH="${ROOTDIR}/bin:${PATH}" protobuild --quiet ${PACKAGES}) + +check-protos: protos ## check if protobufs needs to be generated again + @echo "$(WHALE) $@" + @test -z "$$(git status --short | grep ".pb.go" | tee /dev/stderr)" || \ + ((git diff | cat) && \ + (echo "$(ONI) please run 'make protos' when making changes to proto files" && false)) + +check-api-descriptors: protos ## check that protobuf changes aren't present. + @echo "$(WHALE) $@" + @test -z "$$(git status --short | grep ".pb.txt" | tee /dev/stderr)" || \ + ((git diff $$(find . -name '*.pb.txt') | cat) && \ + (echo "$(ONI) please run 'make protos' when making changes to proto files and check-in the generated descriptor file changes" && false)) + +proto-fmt: ## check format of proto files + @echo "$(WHALE) $@" + @test -z "$$(find . -name '*.proto' -type f -exec grep -Hn -e "^ " {} \; | tee /dev/stderr)" || \ + (echo "$(ONI) please indent proto files with tabs only" && false) + @test -z "$$(find . -name '*.proto' -type f -exec grep -Hn "Meta meta = " {} \; | grep -v '(gogoproto.nullable) = false' | tee /dev/stderr)" || \ + (echo "$(ONI) meta fields in proto files must have option (gogoproto.nullable) = false" && false) + +build: ## build the go packages + @echo "$(WHALE) $@" + @$(GO) build ${DEBUG_GO_GCFLAGS} ${GO_GCFLAGS} ${GO_BUILD_FLAGS} ${EXTRA_FLAGS} ${PACKAGES} + +test: ## run tests, except integration tests and tests that require root + @echo "$(WHALE) $@" + @$(GOTEST) ${TESTFLAGS} ${TESTPACKAGES} + +integration: ## run integration tests + @echo "$(WHALE) $@" + @cd "${ROOTDIR}/integration" && $(GOTEST) -v ${TESTFLAGS} -parallel ${TESTFLAGS_PARALLEL} . + +benchmark: ## run benchmarks tests + @echo "$(WHALE) $@" + @$(GO) test ${TESTFLAGS} -bench . -run Benchmark + +FORCE: + +define BUILD_BINARY +@echo "$(WHALE) $@" +@$(GO) build ${DEBUG_GO_GCFLAGS} ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@ ${GO_TAGS} ./$< +endef + +# Build a binary from a cmd. +bin/%: cmd/% FORCE + $(call BUILD_BINARY) + +binaries: $(BINARIES) ## build binaries + @echo "$(WHALE) $@" + +clean: ## clean up binaries + @echo "$(WHALE) $@" + @rm -f $(BINARIES) + +install: ## install binaries + @echo "$(WHALE) $@ $(BINPACKAGES)" + @$(GO) install $(BINPACKAGES) + +install-protobuf: + @echo "$(WHALE) $@" + @script/install-protobuf + +install-protobuild: + @echo "$(WHALE) $@" + @$(GO) install google.golang.org/protobuf/cmd/protoc-gen-go@v1.28.1 + @$(GO) install github.com/containerd/protobuild@14832ccc41429f5c4f81028e5af08aa233a219cf + +coverage: ## generate coverprofiles from the unit tests, except tests that require root + @echo "$(WHALE) $@" + @rm -f coverage.txt + @$(GO) test -i ${TESTFLAGS} ${TESTPACKAGES} 2> /dev/null + @( for pkg in ${PACKAGES}; do \ + $(GO) test ${TESTFLAGS} \ + -cover \ + -coverprofile=profile.out \ + -covermode=atomic $$pkg || exit; \ + if [ -f profile.out ]; then \ + cat profile.out >> coverage.txt; \ + rm profile.out; \ + fi; \ + done ) + +vendor: ## ensure all the go.mod/go.sum files are up-to-date + @echo "$(WHALE) $@" + @$(GO) mod tidy + @$(GO) mod verify + +verify-vendor: ## verify if all the go.mod/go.sum files are up-to-date + @echo "$(WHALE) $@" + @$(GO) mod tidy + @$(GO) mod verify + @test -z "$$(git status --short | grep "go.sum" | tee /dev/stderr)" || \ + ((git diff | cat) && \ + (echo "$(ONI) make sure to checkin changes after go mod tidy" && false)) + +help: ## this help + @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) | sort diff --git a/vendor/github.com/containerd/ttrpc/PROTOCOL.md b/vendor/github.com/containerd/ttrpc/PROTOCOL.md new file mode 100644 index 0000000000000..12b43f6bd6e37 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/PROTOCOL.md @@ -0,0 +1,240 @@ +# Protocol Specification + +The ttrpc protocol is client/server protocol to support multiple request streams +over a single connection with lightweight framing. The client represents the +process which initiated the underlying connection and the server is the process +which accepted the connection. The protocol is currently defined as +asymmetrical, with clients sending requests and servers sending responses. Both +clients and servers are able to send stream data. The roles are also used in +determining the stream identifiers, with client initiated streams using odd +number identifiers and server initiated using even number. The protocol may be +extended in the future to support server initiated streams, that is not +supported in the latest version. + +## Purpose + +The ttrpc protocol is designed to be lightweight and optimized for low latency +and reliable connections between processes on the same host. The protocol does +not include features for handling unreliable connections such as handshakes, +resets, pings, or flow control. The protocol is designed to make low-overhead +implementations as simple as possible. It is not intended as a suitable +replacement for HTTP2/3 over the network. + +## Message Frame + +Each Message Frame consists of a 10-byte message header followed +by message data. The data length and stream ID are both big-endian +4-byte unsigned integers. The message type is an unsigned 1-byte +integer. The flags are also an unsigned 1-byte integer and +use is defined by the message type. + + +---------------------------------------------------------------+ + | Data Length (32) | + +---------------------------------------------------------------+ + | Stream ID (32) | + +---------------+-----------------------------------------------+ + | Msg Type (8) | + +---------------+ + | Flags (8) | + +---------------+-----------------------------------------------+ + | Data (*) | + +---------------------------------------------------------------+ + +The Data Length field represents the number of bytes in the Data field. The +total frame size will always be Data Length + 10 bytes. The maximum data length +is 4MB and any larger size should be rejected. Due to the maximum data size +being less than 16MB, the first frame byte should always be zero. This first +byte should be considered reserved for future use. + +The Stream ID must be odd for client initiated streams and even for server +initiated streams. Server initiated streams are not currently supported. + +## Mesage Types + +| Message Type | Name | Description | +|--------------|----------|----------------------------------| +| 0x01 | Request | Initiates stream | +| 0x02 | Response | Final stream data and terminates | +| 0x03 | Data | Stream data | + +### Request + +The request message is used to initiate stream and send along request data for +properly routing and handling the stream. The stream may indicate unary without +any inbound or outbound stream data with only a response is expected on the +stream. The request may also indicate the stream is still open for more data and +no response is expected until data is finished. If the remote indicates the +stream is closed, the request may be considered non-unary but without anymore +stream data sent. In the case of `remote closed`, the remote still expects to +receive a response or stream data. For compatibility with non streaming clients, +a request with empty flags indicates a unary request. + +#### Request Flags + +| Flag | Name | Description | +|------|-----------------|--------------------------------------------------| +| 0x01 | `remote closed` | Non-unary, but no more data expected from remote | +| 0x02 | `remote open` | Non-unary, remote is still sending data | + +### Response + +The response message is used to end a stream with data, an empty response, or +an error. A response message is the only expected message after a unary request. +A non-unary request does not require a response message if the server is sending +back stream data. A non-unary stream may return a single response message but no +other stream data may follow. + +#### Response Flags + +No response flags are defined at this time, flags should be empty. + +### Data + +The data message is used to send data on an already initialized stream. Either +client or server may send data. A data message is not allowed on a unary stream. +A data message should not be sent after indicating `remote closed` to the peer. +The last data message on a stream must set the `remote closed` flag. + +The `no data` flag is used to indicate that the data message does not include +any data. This is normally used with the `remote closed` flag to indicate the +stream is now closed without transmitting any data. Since ttrpc normally +transmits a single object per message, a zero length data message may be +interpreted as an empty object. For example, transmitting the number zero as a +protobuf message ends up with a data length of zero, but the message is still +considered data and should be processed. + +#### Data Flags + +| Flag | Name | Description | +|------|-----------------|-----------------------------------| +| 0x01 | `remote closed` | No more data expected from remote | +| 0x04 | `no data` | This message does not have data | + +## Streaming + +All ttrpc requests use streams to transfer data. Unary streams will only have +two messages sent per stream, a request from a client and a response from the +server. Non-unary streams, however, may send any numbers of messages from the +client and the server. This makes stream management more complicated than unary +streams since both client and server need to track additional state. To keep +this management as simple as possible, ttrpc minimizes the number of states and +uses two flags instead of control frames. Each stream has two states while a +stream is still alive: `local closed` and `remote closed`. Each peer considers +local and remote from their own perspective and sets flags from the other peer's +perspective. For example, if a client sends a data frame with the +`remote closed` flag, that is indicating that the client is now `local closed` +and the server will be `remote closed`. A unary operation does not need to send +these flags since each received message always indicates `remote closed`. Once a +peer is both `local closed` and `remote closed`, the stream is considered +finished and may be cleaned up. + +Due to the asymmetric nature of the current protocol, a client should +always be in the `local closed` state before `remote closed` and a server should +always be in the `remote closed` state before `local closed`. This happens +because the client is always initiating requests and a client always expects a +final response back from a server to indicate the initiated request has been +fulfilled. This may mean server sends a final empty response to finish a stream +even after it has already completed sending data before the client. + +### Unary State Diagram + + +--------+ +--------+ + | Client | | Server | + +---+----+ +----+---+ + | +---------+ | + local >---------------+ Request +--------------------> remote + closed | +---------+ | closed + | | + | +----------+ | + finished <--------------+ Response +--------------------< finished + | +----------+ | + | | + +### Non-Unary State Diagrams + +RC: `remote closed` flag +RO: `remote open` flag + + +--------+ +--------+ + | Client | | Server | + +---+----+ +----+---+ + | +--------------+ | + >-------------+ Request [RO] +-----------------> + | +--------------+ | + | | + | +------+ | + >-----------------+ Data +---------------------> + | +------+ | + | | + | +-----------+ | + local >---------------+ Data [RC] +------------------> remote + closed | +-----------+ | closed + | | + | +----------+ | + finished <--------------+ Response +--------------------< finished + | +----------+ | + | | + + +--------+ +--------+ + | Client | | Server | + +---+----+ +----+---+ + | +--------------+ | + local >-------------+ Request [RC] +-----------------> remote + closed | +--------------+ | closed + | | + | +------+ | + <-----------------+ Data +---------------------< + | +------+ | + | | + | +-----------+ | + finished <---------------+ Data [RC] +------------------< finished + | +-----------+ | + | | + + +--------+ +--------+ + | Client | | Server | + +---+----+ +----+---+ + | +--------------+ | + >-------------+ Request [RO] +-----------------> + | +--------------+ | + | | + | +------+ | + >-----------------+ Data +---------------------> + | +------+ | + | | + | +------+ | + <-----------------+ Data +---------------------< + | +------+ | + | | + | +------+ | + >-----------------+ Data +---------------------> + | +------+ | + | | + | +-----------+ | + local >---------------+ Data [RC] +------------------> remote + closed | +-----------+ | closed + | | + | +------+ | + <-----------------+ Data +---------------------< + | +------+ | + | | + | +-----------+ | + finished <---------------+ Data [RC] +------------------< finished + | +-----------+ | + | | + +## RPC + +While this protocol is defined primarily to support Remote Procedure Calls, the +protocol does not define the request and response types beyond the messages +defined in the protocol. The implementation provides a default protobuf +definition of request and response which may be used for cross language rpc. +All implementations should at least define a request type which support +routing by procedure name and a response type which supports call status. + +## Version History + +| Version | Features | +|---------|---------------------| +| 1.0 | Unary requests only | +| 1.2 | Streaming support | diff --git a/vendor/github.com/containerd/ttrpc/Protobuild.toml b/vendor/github.com/containerd/ttrpc/Protobuild.toml new file mode 100644 index 0000000000000..0f6ccbd1e81a0 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/Protobuild.toml @@ -0,0 +1,28 @@ +version = "2" +generators = ["go"] + +# Control protoc include paths. Below are usually some good defaults, but feel +# free to try it without them if it works for your project. +[includes] + # Include paths that will be added before all others. Typically, you want to + # treat the root of the project as an include, but this may not be necessary. + before = ["."] + + # Paths that will be added untouched to the end of the includes. We use + # `/usr/local/include` to pickup the common install location of protobuf. + # This is the default. + after = ["/usr/local/include"] + +# This section maps protobuf imports to Go packages. These will become +# `-M` directives in the call to the go protobuf generator. +[packages] + "google/protobuf/any.proto" = "github.com/gogo/protobuf/types" + "proto/status.proto" = "google.golang.org/genproto/googleapis/rpc/status" + +[[overrides]] +# enable ttrpc and disable fieldpath and grpc for the shim +prefixes = ["github.com/containerd/ttrpc/integration/streaming"] +generators = ["go", "go-ttrpc"] + +[overrides.parameters.go-ttrpc] +prefix = "TTRPC" diff --git a/vendor/github.com/containerd/ttrpc/README.md b/vendor/github.com/containerd/ttrpc/README.md index 547a1297df7de..675a5179ef83d 100644 --- a/vendor/github.com/containerd/ttrpc/README.md +++ b/vendor/github.com/containerd/ttrpc/README.md @@ -1,7 +1,6 @@ # ttrpc [![Build Status](https://github.com/containerd/ttrpc/workflows/CI/badge.svg)](https://github.com/containerd/ttrpc/actions?query=workflow%3ACI) -[![codecov](https://codecov.io/gh/containerd/ttrpc/branch/main/graph/badge.svg)](https://codecov.io/gh/containerd/ttrpc) GRPC for low-memory environments. @@ -20,13 +19,17 @@ Please note that while this project supports generating either end of the protocol, the generated service definitions will be incompatible with regular GRPC services, as they do not speak the same protocol. +# Protocol + +See the [protocol specification](./PROTOCOL.md). + # Usage Create a gogo vanity binary (see [`cmd/protoc-gen-gogottrpc/main.go`](cmd/protoc-gen-gogottrpc/main.go) for an example with the ttrpc plugin enabled. -It's recommended to use [`protobuild`](https://github.com//stevvooe/protobuild) +It's recommended to use [`protobuild`](https://github.com/containerd/protobuild) to build the protobufs for this project, but this will work with protoc directly, if required. @@ -37,13 +40,11 @@ directly, if required. - The client and server interface are identical whereas in GRPC there is a client and server interface that are different. - The Go stdlib context package is used instead. -- No support for streams yet. # Status TODO: -- [ ] Document protocol layout - [ ] Add testing under concurrent load to ensure - [ ] Verify connection error handling diff --git a/vendor/github.com/containerd/ttrpc/channel.go b/vendor/github.com/containerd/ttrpc/channel.go index 81116a5e23fc4..feafd9a6b57c0 100644 --- a/vendor/github.com/containerd/ttrpc/channel.go +++ b/vendor/github.com/containerd/ttrpc/channel.go @@ -38,6 +38,26 @@ type messageType uint8 const ( messageTypeRequest messageType = 0x1 messageTypeResponse messageType = 0x2 + messageTypeData messageType = 0x3 +) + +func (mt messageType) String() string { + switch mt { + case messageTypeRequest: + return "request" + case messageTypeResponse: + return "response" + case messageTypeData: + return "data" + default: + return "unknown" + } +} + +const ( + flagRemoteClosed uint8 = 0x1 + flagRemoteOpen uint8 = 0x2 + flagNoData uint8 = 0x4 ) // messageHeader represents the fixed-length message header of 10 bytes sent @@ -46,7 +66,7 @@ type messageHeader struct { Length uint32 // length excluding this header. b[:4] StreamID uint32 // identifies which request stream message is a part of. b[4:8] Type messageType // message type b[8] - Flags uint8 // reserved b[9] + Flags uint8 // type specific flags b[9] } func readMessageHeader(p []byte, r io.Reader) (messageHeader, error) { @@ -111,22 +131,31 @@ func (ch *channel) recv() (messageHeader, []byte, error) { return mh, nil, status.Errorf(codes.ResourceExhausted, "message length %v exceed maximum message size of %v", mh.Length, messageLengthMax) } - p := ch.getmbuf(int(mh.Length)) - if _, err := io.ReadFull(ch.br, p); err != nil { - return messageHeader{}, nil, fmt.Errorf("failed reading message: %w", err) + var p []byte + if mh.Length > 0 { + p = ch.getmbuf(int(mh.Length)) + if _, err := io.ReadFull(ch.br, p); err != nil { + return messageHeader{}, nil, fmt.Errorf("failed reading message: %w", err) + } } return mh, p, nil } -func (ch *channel) send(streamID uint32, t messageType, p []byte) error { - if err := writeMessageHeader(ch.bw, ch.hwbuf[:], messageHeader{Length: uint32(len(p)), StreamID: streamID, Type: t}); err != nil { +func (ch *channel) send(streamID uint32, t messageType, flags uint8, p []byte) error { + // TODO: Error on send rather than on recv + //if len(p) > messageLengthMax { + // return status.Errorf(codes.InvalidArgument, "refusing to send, message length %v exceed maximum message size of %v", len(p), messageLengthMax) + //} + if err := writeMessageHeader(ch.bw, ch.hwbuf[:], messageHeader{Length: uint32(len(p)), StreamID: streamID, Type: t, Flags: flags}); err != nil { return err } - _, err := ch.bw.Write(p) - if err != nil { - return err + if len(p) > 0 { + _, err := ch.bw.Write(p) + if err != nil { + return err + } } return ch.bw.Flush() diff --git a/vendor/github.com/containerd/ttrpc/client.go b/vendor/github.com/containerd/ttrpc/client.go index 26c3dd2a98192..0abc7025d6302 100644 --- a/vendor/github.com/containerd/ttrpc/client.go +++ b/vendor/github.com/containerd/ttrpc/client.go @@ -19,30 +19,30 @@ package ttrpc import ( "context" "errors" + "fmt" "io" "net" - "os" "strings" "sync" "syscall" "time" - "github.com/gogo/protobuf/proto" "github.com/sirupsen/logrus" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" ) -// ErrClosed is returned by client methods when the underlying connection is -// closed. -var ErrClosed = errors.New("ttrpc: closed") - // Client for a ttrpc server type Client struct { codec codec conn net.Conn channel *channel - calls chan *callRequest + + streamLock sync.RWMutex + streams map[streamID]*stream + nextStreamID streamID + sendLock sync.Mutex ctx context.Context closed func() @@ -51,8 +51,6 @@ type Client struct { userCloseFunc func() userCloseWaitCh chan struct{} - errOnce sync.Once - err error interceptor UnaryClientInterceptor } @@ -73,13 +71,16 @@ func WithUnaryClientInterceptor(i UnaryClientInterceptor) ClientOpts { } } +// NewClient creates a new ttrpc client using the given connection func NewClient(conn net.Conn, opts ...ClientOpts) *Client { ctx, cancel := context.WithCancel(context.Background()) + channel := newChannel(conn) c := &Client{ codec: codec{}, conn: conn, - channel: newChannel(conn), - calls: make(chan *callRequest), + channel: channel, + streams: make(map[streamID]*stream), + nextStreamID: 1, closed: cancel, ctx: ctx, userCloseFunc: func() {}, @@ -95,13 +96,13 @@ func NewClient(conn net.Conn, opts ...ClientOpts) *Client { return c } -type callRequest struct { - ctx context.Context - req *Request - resp *Response // response will be written back here - errs chan error // error written here on completion +func (c *Client) send(sid uint32, mt messageType, flags uint8, b []byte) error { + c.sendLock.Lock() + defer c.sendLock.Unlock() + return c.channel.send(sid, mt, flags, b) } +// Call makes a unary request and returns with response func (c *Client) Call(ctx context.Context, service, method string, req, resp interface{}) error { payload, err := c.codec.Marshal(req) if err != nil { @@ -113,6 +114,7 @@ func (c *Client) Call(ctx context.Context, service, method string, req, resp int Service: service, Method: method, Payload: payload, + // TODO: metadata from context } cresp = &Response{} @@ -123,7 +125,7 @@ func (c *Client) Call(ctx context.Context, service, method string, req, resp int } if dl, ok := ctx.Deadline(); ok { - creq.TimeoutNano = dl.Sub(time.Now()).Nanoseconds() + creq.TimeoutNano = time.Until(dl).Nanoseconds() } info := &UnaryClientInfo{ @@ -143,36 +145,137 @@ func (c *Client) Call(ctx context.Context, service, method string, req, resp int return nil } -func (c *Client) dispatch(ctx context.Context, req *Request, resp *Response) error { - errs := make(chan error, 1) - call := &callRequest{ - ctx: ctx, - req: req, - resp: resp, - errs: errs, +// StreamDesc describes the stream properties, whether the stream has +// a streaming client, a streaming server, or both +type StreamDesc struct { + StreamingClient bool + StreamingServer bool +} + +// ClientStream is used to send or recv messages on the underlying stream +type ClientStream interface { + CloseSend() error + SendMsg(m interface{}) error + RecvMsg(m interface{}) error +} + +type clientStream struct { + ctx context.Context + s *stream + c *Client + desc *StreamDesc + localClosed bool + remoteClosed bool +} + +func (cs *clientStream) CloseSend() error { + if !cs.desc.StreamingClient { + return fmt.Errorf("%w: cannot close non-streaming client", ErrProtocol) } + if cs.localClosed { + return ErrStreamClosed + } + err := cs.s.send(messageTypeData, flagRemoteClosed|flagNoData, nil) + if err != nil { + return filterCloseErr(err) + } + cs.localClosed = true + return nil +} - select { - case <-ctx.Done(): - return ctx.Err() - case c.calls <- call: - case <-c.ctx.Done(): - return c.error() +func (cs *clientStream) SendMsg(m interface{}) error { + if !cs.desc.StreamingClient { + return fmt.Errorf("%w: cannot send data from non-streaming client", ErrProtocol) + } + if cs.localClosed { + return ErrStreamClosed } - select { - case <-ctx.Done(): - return ctx.Err() - case err := <-errs: + var ( + payload []byte + err error + ) + if m != nil { + payload, err = cs.c.codec.Marshal(m) + if err != nil { + return err + } + } + + err = cs.s.send(messageTypeData, 0, payload) + if err != nil { return filterCloseErr(err) - case <-c.ctx.Done(): - return c.error() } + + return nil } +func (cs *clientStream) RecvMsg(m interface{}) error { + if cs.remoteClosed { + return io.EOF + } + select { + case <-cs.ctx.Done(): + return cs.ctx.Err() + case msg, ok := <-cs.s.recv: + if !ok { + return cs.s.recvErr + } + + if msg.header.Type == messageTypeResponse { + resp := &Response{} + err := proto.Unmarshal(msg.payload[:msg.header.Length], resp) + // return the payload buffer for reuse + cs.c.channel.putmbuf(msg.payload) + if err != nil { + return err + } + + if err := cs.c.codec.Unmarshal(resp.Payload, m); err != nil { + return err + } + + if resp.Status != nil && resp.Status.Code != int32(codes.OK) { + return status.ErrorProto(resp.Status) + } + + cs.c.deleteStream(cs.s) + cs.remoteClosed = true + + return nil + } else if msg.header.Type == messageTypeData { + if !cs.desc.StreamingServer { + cs.c.deleteStream(cs.s) + cs.remoteClosed = true + return fmt.Errorf("received data from non-streaming server: %w", ErrProtocol) + } + if msg.header.Flags&flagRemoteClosed == flagRemoteClosed { + cs.c.deleteStream(cs.s) + cs.remoteClosed = true + + if msg.header.Flags&flagNoData == flagNoData { + return io.EOF + } + } + + err := cs.c.codec.Unmarshal(msg.payload[:msg.header.Length], m) + cs.c.channel.putmbuf(msg.payload) + if err != nil { + return err + } + return nil + } + + return fmt.Errorf("unexpected %q message received: %w", msg.header.Type, ErrProtocol) + } +} + +// Close closes the ttrpc connection and underlying connection func (c *Client) Close() error { c.closeOnce.Do(func() { c.closed() + + c.conn.Close() }) return nil } @@ -188,194 +291,105 @@ func (c *Client) UserOnCloseWait(ctx context.Context) error { } } -type message struct { - messageHeader - p []byte - err error -} +func (c *Client) run() { + err := c.receiveLoop() + c.Close() + c.cleanupStreams(err) -// callMap provides access to a map of active calls, guarded by a mutex. -type callMap struct { - m sync.Mutex - activeCalls map[uint32]*callRequest - closeErr error + c.userCloseFunc() + close(c.userCloseWaitCh) } -// newCallMap returns a new callMap with an empty set of active calls. -func newCallMap() *callMap { - return &callMap{ - activeCalls: make(map[uint32]*callRequest), - } -} +func (c *Client) receiveLoop() error { + for { + select { + case <-c.ctx.Done(): + return ErrClosed + default: + var ( + msg = &streamMessage{} + err error + ) + + msg.header, msg.payload, err = c.channel.recv() + if err != nil { + _, ok := status.FromError(err) + if !ok { + // treat all errors that are not an rpc status as terminal. + // all others poison the connection. + return filterCloseErr(err) + } + } + sid := streamID(msg.header.StreamID) + s := c.getStream(sid) + if s == nil { + logrus.WithField("stream", sid).Errorf("ttrpc: received message on inactive stream") + continue + } -// set adds a call entry to the map with the given streamID key. -func (cm *callMap) set(streamID uint32, cr *callRequest) error { - cm.m.Lock() - defer cm.m.Unlock() - if cm.closeErr != nil { - return cm.closeErr + if err != nil { + s.closeWithError(err) + } else { + if err := s.receive(c.ctx, msg); err != nil { + logrus.WithError(err).WithField("stream", sid).Errorf("ttrpc: failed to handle message") + } + } + } } - cm.activeCalls[streamID] = cr - return nil } -// get looks up the call entry for the given streamID key, then removes it -// from the map and returns it. -func (cm *callMap) get(streamID uint32) (cr *callRequest, ok bool, err error) { - cm.m.Lock() - defer cm.m.Unlock() - if cm.closeErr != nil { - return nil, false, cm.closeErr - } - cr, ok = cm.activeCalls[streamID] - if ok { - delete(cm.activeCalls, streamID) - } - return -} +// createStream creates a new stream and registers it with the client +// Introduce stream types for multiple or single response +func (c *Client) createStream(flags uint8, b []byte) (*stream, error) { + c.streamLock.Lock() -// abort sends the given error to each active call, and clears the map. -// Once abort has been called, any subsequent calls to the callMap will return the error passed to abort. -func (cm *callMap) abort(err error) error { - cm.m.Lock() - defer cm.m.Unlock() - if cm.closeErr != nil { - return cm.closeErr - } - for streamID, call := range cm.activeCalls { - call.errs <- err - delete(cm.activeCalls, streamID) + // Check if closed since lock acquired to prevent adding + // anything after cleanup completes + select { + case <-c.ctx.Done(): + c.streamLock.Unlock() + return nil, ErrClosed + default: } - cm.closeErr = err - return nil -} - -func (c *Client) run() { - var ( - waiters = newCallMap() - receiverDone = make(chan struct{}) - ) - // Sender goroutine - // Receives calls from dispatch, adds them to the set of active calls, and sends them - // to the server. - go func() { - var streamID uint32 = 1 - for { - select { - case <-c.ctx.Done(): - return - case call := <-c.calls: - id := streamID - streamID += 2 // enforce odd client initiated request ids - if err := waiters.set(id, call); err != nil { - call.errs <- err // errs is buffered so should not block. - continue - } - if err := c.send(id, messageTypeRequest, call.req); err != nil { - call.errs <- err // errs is buffered so should not block. - waiters.get(id) // remove from waiters set - } - } - } - }() - - // Receiver goroutine - // Receives responses from the server, looks up the call info in the set of active calls, - // and notifies the caller of the response. - go func() { - defer close(receiverDone) - for { - select { - case <-c.ctx.Done(): - c.setError(c.ctx.Err()) - return - default: - mh, p, err := c.channel.recv() - if err != nil { - _, ok := status.FromError(err) - if !ok { - // treat all errors that are not an rpc status as terminal. - // all others poison the connection. - c.setError(filterCloseErr(err)) - return - } - } - msg := &message{ - messageHeader: mh, - p: p[:mh.Length], - err: err, - } - call, ok, err := waiters.get(mh.StreamID) - if err != nil { - logrus.Errorf("ttrpc: failed to look up active call: %s", err) - continue - } - if !ok { - logrus.Errorf("ttrpc: received message for unknown channel %v", mh.StreamID) - continue - } - call.errs <- c.recv(call.resp, msg) - } - } - }() + // Stream ID should be allocated at same time + s := newStream(c.nextStreamID, c) + c.streams[s.id] = s + c.nextStreamID = c.nextStreamID + 2 - defer func() { - c.conn.Close() - c.userCloseFunc() - close(c.userCloseWaitCh) - }() + c.sendLock.Lock() + defer c.sendLock.Unlock() + c.streamLock.Unlock() - for { - select { - case <-receiverDone: - // The receiver has exited. - // don't return out, let the close of the context trigger the abort of waiters - c.Close() - case <-c.ctx.Done(): - // Abort all active calls. This will also prevent any new calls from being added - // to waiters. - waiters.abort(c.error()) - return - } + if err := c.channel.send(uint32(s.id), messageTypeRequest, flags, b); err != nil { + return s, filterCloseErr(err) } -} -func (c *Client) error() error { - c.errOnce.Do(func() { - if c.err == nil { - c.err = ErrClosed - } - }) - return c.err + return s, nil } -func (c *Client) setError(err error) { - c.errOnce.Do(func() { - c.err = err - }) +func (c *Client) deleteStream(s *stream) { + c.streamLock.Lock() + delete(c.streams, s.id) + c.streamLock.Unlock() + s.closeWithError(nil) } -func (c *Client) send(streamID uint32, mtype messageType, msg interface{}) error { - p, err := c.codec.Marshal(msg) - if err != nil { - return err - } - - return c.channel.send(streamID, mtype, p) +func (c *Client) getStream(sid streamID) *stream { + c.streamLock.RLock() + s := c.streams[sid] + c.streamLock.RUnlock() + return s } -func (c *Client) recv(resp *Response, msg *message) error { - if msg.err != nil { - return msg.err - } +func (c *Client) cleanupStreams(err error) { + c.streamLock.Lock() + defer c.streamLock.Unlock() - if msg.Type != messageTypeResponse { - return errors.New("unknown message type received") + for sid, s := range c.streams { + s.closeWithError(err) + delete(c.streams, sid) } - - defer c.channel.putmbuf(msg.p) - return proto.Unmarshal(msg.p, resp) } // filterCloseErr rewrites EOF and EPIPE errors to ErrClosed. Use when @@ -388,6 +402,8 @@ func filterCloseErr(err error) error { return nil case err == io.EOF: return ErrClosed + case errors.Is(err, io.ErrClosedPipe): + return ErrClosed case errors.Is(err, io.EOF): return ErrClosed case strings.Contains(err.Error(), "use of closed network connection"): @@ -395,11 +411,9 @@ func filterCloseErr(err error) error { default: // if we have an epipe on a write or econnreset on a read , we cast to errclosed var oerr *net.OpError - if errors.As(err, &oerr) && (oerr.Op == "write" || oerr.Op == "read") { - serr, sok := oerr.Err.(*os.SyscallError) - if sok && ((serr.Err == syscall.EPIPE && oerr.Op == "write") || - (serr.Err == syscall.ECONNRESET && oerr.Op == "read")) { - + if errors.As(err, &oerr) { + if (oerr.Op == "write" && errors.Is(err, syscall.EPIPE)) || + (oerr.Op == "read" && errors.Is(err, syscall.ECONNRESET)) { return ErrClosed } } @@ -407,3 +421,81 @@ func filterCloseErr(err error) error { return err } + +// NewStream creates a new stream with the given stream descriptor to the +// specified service and method. If not a streaming client, the request object +// may be provided. +func (c *Client) NewStream(ctx context.Context, desc *StreamDesc, service, method string, req interface{}) (ClientStream, error) { + var payload []byte + if req != nil { + var err error + payload, err = c.codec.Marshal(req) + if err != nil { + return nil, err + } + } + + request := &Request{ + Service: service, + Method: method, + Payload: payload, + // TODO: metadata from context + } + p, err := c.codec.Marshal(request) + if err != nil { + return nil, err + } + + var flags uint8 + if desc.StreamingClient { + flags = flagRemoteOpen + } else { + flags = flagRemoteClosed + } + s, err := c.createStream(flags, p) + if err != nil { + return nil, err + } + + return &clientStream{ + ctx: ctx, + s: s, + c: c, + desc: desc, + }, nil +} + +func (c *Client) dispatch(ctx context.Context, req *Request, resp *Response) error { + p, err := c.codec.Marshal(req) + if err != nil { + return err + } + + s, err := c.createStream(0, p) + if err != nil { + return err + } + defer c.deleteStream(s) + + select { + case <-ctx.Done(): + return ctx.Err() + case <-c.ctx.Done(): + return ErrClosed + case msg, ok := <-s.recv: + if !ok { + return s.recvErr + } + + if msg.header.Type == messageTypeResponse { + err = proto.Unmarshal(msg.payload[:msg.header.Length], resp) + } else { + err = fmt.Errorf("unexpected %q message received: %w", msg.header.Type, ErrProtocol) + } + + // return the payload buffer for reuse + c.channel.putmbuf(msg.payload) + + return err + } +} diff --git a/vendor/github.com/containerd/ttrpc/codec.go b/vendor/github.com/containerd/ttrpc/codec.go index 880634c27e39d..3e82722a4245b 100644 --- a/vendor/github.com/containerd/ttrpc/codec.go +++ b/vendor/github.com/containerd/ttrpc/codec.go @@ -19,7 +19,7 @@ package ttrpc import ( "fmt" - "github.com/gogo/protobuf/proto" + "google.golang.org/protobuf/proto" ) type codec struct{} diff --git a/vendor/github.com/containerd/ttrpc/doc.go b/vendor/github.com/containerd/ttrpc/doc.go new file mode 100644 index 0000000000000..d80cd424cc8be --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/doc.go @@ -0,0 +1,23 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* +package ttrpc defines and implements a low level simple transfer protocol +optimized for low latency and reliable connections between processes on the same +host. The protocol uses simple framing for sending requests, responses, and data +using multiple streams. +*/ +package ttrpc diff --git a/vendor/github.com/containerd/ttrpc/errors.go b/vendor/github.com/containerd/ttrpc/errors.go new file mode 100644 index 0000000000000..ec14b7952bfb6 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/errors.go @@ -0,0 +1,34 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import "errors" + +var ( + // ErrProtocol is a general error in the handling the protocol. + ErrProtocol = errors.New("protocol error") + + // ErrClosed is returned by client methods when the underlying connection is + // closed. + ErrClosed = errors.New("ttrpc: closed") + + // ErrServerClosed is returned when the Server has closed its connection. + ErrServerClosed = errors.New("ttrpc: server closed") + + // ErrStreamClosed is when the streaming connection is closed. + ErrStreamClosed = errors.New("ttrpc: stream closed") +) diff --git a/vendor/github.com/containerd/ttrpc/handshake.go b/vendor/github.com/containerd/ttrpc/handshake.go index a424b67a4976a..3c6b610d35d42 100644 --- a/vendor/github.com/containerd/ttrpc/handshake.go +++ b/vendor/github.com/containerd/ttrpc/handshake.go @@ -45,6 +45,6 @@ func (fn handshakerFunc) Handshake(ctx context.Context, conn net.Conn) (net.Conn return fn(ctx, conn) } -func noopHandshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) { +func noopHandshake(_ context.Context, conn net.Conn) (net.Conn, interface{}, error) { return conn, nil, nil } diff --git a/vendor/github.com/containerd/ttrpc/interceptor.go b/vendor/github.com/containerd/ttrpc/interceptor.go index c1219dac65f6f..7ff5e9d33f23a 100644 --- a/vendor/github.com/containerd/ttrpc/interceptor.go +++ b/vendor/github.com/containerd/ttrpc/interceptor.go @@ -28,6 +28,13 @@ type UnaryClientInfo struct { FullMethod string } +// StreamServerInfo provides information about the server request +type StreamServerInfo struct { + FullMethod string + StreamingClient bool + StreamingServer bool +} + // Unmarshaler contains the server request data and allows it to be unmarshaled // into a concrete type type Unmarshaler func(interface{}) error @@ -41,10 +48,18 @@ type UnaryServerInterceptor func(context.Context, Unmarshaler, *UnaryServerInfo, // UnaryClientInterceptor specifies the interceptor function for client request/response type UnaryClientInterceptor func(context.Context, *Request, *Response, *UnaryClientInfo, Invoker) error -func defaultServerInterceptor(ctx context.Context, unmarshal Unmarshaler, info *UnaryServerInfo, method Method) (interface{}, error) { +func defaultServerInterceptor(ctx context.Context, unmarshal Unmarshaler, _ *UnaryServerInfo, method Method) (interface{}, error) { return method(ctx, unmarshal) } func defaultClientInterceptor(ctx context.Context, req *Request, resp *Response, _ *UnaryClientInfo, invoker Invoker) error { return invoker(ctx, req, resp) } + +type StreamServerInterceptor func(context.Context, StreamServer, *StreamServerInfo, StreamHandler) (interface{}, error) + +func defaultStreamServerInterceptor(ctx context.Context, ss StreamServer, _ *StreamServerInfo, stream StreamHandler) (interface{}, error) { + return stream(ctx, ss) +} + +type StreamClientInterceptor func(context.Context) diff --git a/vendor/github.com/containerd/ttrpc/request.pb.go b/vendor/github.com/containerd/ttrpc/request.pb.go new file mode 100644 index 0000000000000..3921ae5a356c6 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/request.pb.go @@ -0,0 +1,396 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.20.1 +// source: github.com/containerd/ttrpc/request.proto + +package ttrpc + +import ( + status "google.golang.org/genproto/googleapis/rpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` + Method string `protobuf:"bytes,2,opt,name=method,proto3" json:"method,omitempty"` + Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` + TimeoutNano int64 `protobuf:"varint,4,opt,name=timeout_nano,json=timeoutNano,proto3" json:"timeout_nano,omitempty"` + Metadata []*KeyValue `protobuf:"bytes,5,rep,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *Request) Reset() { + *x = Request{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Request) ProtoMessage() {} + +func (x *Request) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Request.ProtoReflect.Descriptor instead. +func (*Request) Descriptor() ([]byte, []int) { + return file_github_com_containerd_ttrpc_request_proto_rawDescGZIP(), []int{0} +} + +func (x *Request) GetService() string { + if x != nil { + return x.Service + } + return "" +} + +func (x *Request) GetMethod() string { + if x != nil { + return x.Method + } + return "" +} + +func (x *Request) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +func (x *Request) GetTimeoutNano() int64 { + if x != nil { + return x.TimeoutNano + } + return 0 +} + +func (x *Request) GetMetadata() []*KeyValue { + if x != nil { + return x.Metadata + } + return nil +} + +type Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status *status.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` +} + +func (x *Response) Reset() { + *x = Response{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Response) ProtoMessage() {} + +func (x *Response) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Response.ProtoReflect.Descriptor instead. +func (*Response) Descriptor() ([]byte, []int) { + return file_github_com_containerd_ttrpc_request_proto_rawDescGZIP(), []int{1} +} + +func (x *Response) GetStatus() *status.Status { + if x != nil { + return x.Status + } + return nil +} + +func (x *Response) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +type StringList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + List []string `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` +} + +func (x *StringList) Reset() { + *x = StringList{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StringList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringList) ProtoMessage() {} + +func (x *StringList) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StringList.ProtoReflect.Descriptor instead. +func (*StringList) Descriptor() ([]byte, []int) { + return file_github_com_containerd_ttrpc_request_proto_rawDescGZIP(), []int{2} +} + +func (x *StringList) GetList() []string { + if x != nil { + return x.List + } + return nil +} + +type KeyValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *KeyValue) Reset() { + *x = KeyValue{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyValue) ProtoMessage() {} + +func (x *KeyValue) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_ttrpc_request_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead. +func (*KeyValue) Descriptor() ([]byte, []int) { + return file_github_com_containerd_ttrpc_request_proto_rawDescGZIP(), []int{3} +} + +func (x *KeyValue) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *KeyValue) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +var File_github_com_containerd_ttrpc_request_proto protoreflect.FileDescriptor + +var file_github_com_containerd_ttrpc_request_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x74, 0x74, 0x72, 0x70, 0x63, 0x2f, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x74, 0x74, 0x72, + 0x70, 0x63, 0x1a, 0x12, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa5, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, + 0x74, 0x68, 0x6f, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x21, + 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x4e, 0x61, 0x6e, + 0x6f, 0x12, 0x2b, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x74, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x4b, 0x65, 0x79, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x45, + 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x07, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x20, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4c, + 0x69, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x04, 0x6c, 0x69, 0x73, 0x74, 0x22, 0x32, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x1d, 0x5a, 0x1b, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x74, 0x74, 0x72, 0x70, 0x63, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_github_com_containerd_ttrpc_request_proto_rawDescOnce sync.Once + file_github_com_containerd_ttrpc_request_proto_rawDescData = file_github_com_containerd_ttrpc_request_proto_rawDesc +) + +func file_github_com_containerd_ttrpc_request_proto_rawDescGZIP() []byte { + file_github_com_containerd_ttrpc_request_proto_rawDescOnce.Do(func() { + file_github_com_containerd_ttrpc_request_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_ttrpc_request_proto_rawDescData) + }) + return file_github_com_containerd_ttrpc_request_proto_rawDescData +} + +var file_github_com_containerd_ttrpc_request_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_github_com_containerd_ttrpc_request_proto_goTypes = []interface{}{ + (*Request)(nil), // 0: ttrpc.Request + (*Response)(nil), // 1: ttrpc.Response + (*StringList)(nil), // 2: ttrpc.StringList + (*KeyValue)(nil), // 3: ttrpc.KeyValue + (*status.Status)(nil), // 4: Status +} +var file_github_com_containerd_ttrpc_request_proto_depIdxs = []int32{ + 3, // 0: ttrpc.Request.metadata:type_name -> ttrpc.KeyValue + 4, // 1: ttrpc.Response.status:type_name -> Status + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_ttrpc_request_proto_init() } +func file_github_com_containerd_ttrpc_request_proto_init() { + if File_github_com_containerd_ttrpc_request_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_ttrpc_request_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_ttrpc_request_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_ttrpc_request_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StringList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_ttrpc_request_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_ttrpc_request_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_ttrpc_request_proto_goTypes, + DependencyIndexes: file_github_com_containerd_ttrpc_request_proto_depIdxs, + MessageInfos: file_github_com_containerd_ttrpc_request_proto_msgTypes, + }.Build() + File_github_com_containerd_ttrpc_request_proto = out.File + file_github_com_containerd_ttrpc_request_proto_rawDesc = nil + file_github_com_containerd_ttrpc_request_proto_goTypes = nil + file_github_com_containerd_ttrpc_request_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/ttrpc/request.proto b/vendor/github.com/containerd/ttrpc/request.proto new file mode 100644 index 0000000000000..37da334fc2a98 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/request.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; + +package ttrpc; + +import "proto/status.proto"; + +option go_package = "github.com/containerd/ttrpc"; + +message Request { + string service = 1; + string method = 2; + bytes payload = 3; + int64 timeout_nano = 4; + repeated KeyValue metadata = 5; +} + +message Response { + Status status = 1; + bytes payload = 2; +} + +message StringList { + repeated string list = 1; +} + +message KeyValue { + string key = 1; + string value = 2; +} diff --git a/vendor/github.com/containerd/ttrpc/server.go b/vendor/github.com/containerd/ttrpc/server.go index b0e48073e4d9f..e4dd10b06ad80 100644 --- a/vendor/github.com/containerd/ttrpc/server.go +++ b/vendor/github.com/containerd/ttrpc/server.go @@ -24,6 +24,7 @@ import ( "net" "sync" "sync/atomic" + "syscall" "time" "github.com/sirupsen/logrus" @@ -31,10 +32,6 @@ import ( "google.golang.org/grpc/status" ) -var ( - ErrServerClosed = errors.New("ttrpc: server closed") -) - type Server struct { config *serverConfig services *serviceSet @@ -66,8 +63,14 @@ func NewServer(opts ...ServerOpt) (*Server, error) { }, nil } +// Register registers a map of methods to method handlers +// TODO: Remove in 2.0, does not support streams func (s *Server) Register(name string, methods map[string]Method) { - s.services.register(name, methods) + s.services.register(name, &ServiceDesc{Methods: methods}) +} + +func (s *Server) RegisterService(name string, desc *ServiceDesc) { + s.services.register(name, desc) } func (s *Server) Serve(ctx context.Context, l net.Listener) error { @@ -301,27 +304,25 @@ func (c *serverConn) close() error { func (c *serverConn) run(sctx context.Context) { type ( - request struct { - id uint32 - req *Request - } - response struct { - id uint32 - resp *Response + id uint32 + status *status.Status + data []byte + closeStream bool + streaming bool } ) var ( - ch = newChannel(c.conn) - ctx, cancel = context.WithCancel(sctx) - active int - state connState = connStateIdle - responses = make(chan response) - requests = make(chan request) - recvErr = make(chan error, 1) - shutdown = c.shutdown - done = make(chan struct{}) + ch = newChannel(c.conn) + ctx, cancel = context.WithCancel(sctx) + state connState = connStateIdle + responses = make(chan response) + recvErr = make(chan error, 1) + done = make(chan struct{}) + streams = sync.Map{} + active int32 + lastStreamID uint32 ) defer c.conn.Close() @@ -329,27 +330,26 @@ func (c *serverConn) run(sctx context.Context) { defer close(done) defer c.server.delConnection(c) - go func(recvErr chan error) { - defer close(recvErr) - sendImmediate := func(id uint32, st *status.Status) bool { - select { - case responses <- response{ - // even though we've had an invalid stream id, we send it - // back on the same stream id so the client knows which - // stream id was bad. - id: id, - resp: &Response{ - Status: st.Proto(), - }, - }: - return true - case <-c.shutdown: - return false - case <-done: - return false - } + sendStatus := func(id uint32, st *status.Status) bool { + select { + case responses <- response{ + // even though we've had an invalid stream id, we send it + // back on the same stream id so the client knows which + // stream id was bad. + id: id, + status: st, + closeStream: true, + }: + return true + case <-c.shutdown: + return false + case <-done: + return false } + } + go func(recvErr chan error) { + defer close(recvErr) for { select { case <-c.shutdown: @@ -369,112 +369,173 @@ func (c *serverConn) run(sctx context.Context) { // in this case, we send an error for that particular message // when the status is defined. - if !sendImmediate(mh.StreamID, status) { + if !sendStatus(mh.StreamID, status) { return } continue } - if mh.Type != messageTypeRequest { - // we must ignore this for future compat. - continue - } - - var req Request - if err := c.server.codec.Unmarshal(p, &req); err != nil { - ch.putmbuf(p) - if !sendImmediate(mh.StreamID, status.Newf(codes.InvalidArgument, "unmarshal request error: %v", err)) { - return - } - continue - } - ch.putmbuf(p) - if mh.StreamID%2 != 1 { // enforce odd client initiated identifiers. - if !sendImmediate(mh.StreamID, status.Newf(codes.InvalidArgument, "StreamID must be odd for client initiated streams")) { + if !sendStatus(mh.StreamID, status.Newf(codes.InvalidArgument, "StreamID must be odd for client initiated streams")) { return } continue } - // Forward the request to the main loop. We don't wait on s.done - // because we have already accepted the client request. - select { - case requests <- request{ - id: mh.StreamID, - req: &req, - }: - case <-done: - return + if mh.Type == messageTypeData { + i, ok := streams.Load(mh.StreamID) + if !ok { + if !sendStatus(mh.StreamID, status.Newf(codes.InvalidArgument, "StreamID is no longer active")) { + return + } + } + sh := i.(*streamHandler) + if mh.Flags&flagNoData != flagNoData { + unmarshal := func(obj interface{}) error { + err := protoUnmarshal(p, obj) + ch.putmbuf(p) + return err + } + + if err := sh.data(unmarshal); err != nil { + if !sendStatus(mh.StreamID, status.Newf(codes.InvalidArgument, "data handling error: %v", err)) { + return + } + } + } + + if mh.Flags&flagRemoteClosed == flagRemoteClosed { + sh.closeSend() + if len(p) > 0 { + if !sendStatus(mh.StreamID, status.Newf(codes.InvalidArgument, "data close message cannot include data")) { + return + } + } + } + } else if mh.Type == messageTypeRequest { + if mh.StreamID <= lastStreamID { + // enforce odd client initiated identifiers. + if !sendStatus(mh.StreamID, status.Newf(codes.InvalidArgument, "StreamID cannot be re-used and must increment")) { + return + } + continue + + } + lastStreamID = mh.StreamID + + // TODO: Make request type configurable + // Unmarshaller which takes in a byte array and returns an interface? + var req Request + if err := c.server.codec.Unmarshal(p, &req); err != nil { + ch.putmbuf(p) + if !sendStatus(mh.StreamID, status.Newf(codes.InvalidArgument, "unmarshal request error: %v", err)) { + return + } + continue + } + ch.putmbuf(p) + + id := mh.StreamID + respond := func(status *status.Status, data []byte, streaming, closeStream bool) error { + select { + case responses <- response{ + id: id, + status: status, + data: data, + closeStream: closeStream, + streaming: streaming, + }: + case <-done: + return ErrClosed + } + return nil + } + sh, err := c.server.services.handle(ctx, &req, respond) + if err != nil { + status, _ := status.FromError(err) + if !sendStatus(mh.StreamID, status) { + return + } + continue + } + + streams.Store(id, sh) + atomic.AddInt32(&active, 1) } + // TODO: else we must ignore this for future compat. log this? } }(recvErr) for { - newstate := state - switch { - case active > 0: + var ( + newstate connState + shutdown chan struct{} + ) + + activeN := atomic.LoadInt32(&active) + if activeN > 0 { newstate = connStateActive shutdown = nil - case active == 0: + } else { newstate = connStateIdle shutdown = c.shutdown // only enable this branch in idle mode } - if newstate != state { c.setState(newstate) state = newstate } select { - case request := <-requests: - active++ - go func(id uint32) { - ctx, cancel := getRequestContext(ctx, request.req) - defer cancel() - - p, status := c.server.services.call(ctx, request.req.Service, request.req.Method, request.req.Payload) - resp := &Response{ - Status: status.Proto(), - Payload: p, + case response := <-responses: + if !response.streaming || response.status.Code() != codes.OK { + p, err := c.server.codec.Marshal(&Response{ + Status: response.status.Proto(), + Payload: response.data, + }) + if err != nil { + logrus.WithError(err).Error("failed marshaling response") + return } - select { - case responses <- response{ - id: id, - resp: resp, - }: - case <-done: + if err := ch.send(response.id, messageTypeResponse, 0, p); err != nil { + logrus.WithError(err).Error("failed sending message on channel") + return + } + } else { + var flags uint8 + if response.closeStream { + flags = flagRemoteClosed + } + if response.data == nil { + flags = flags | flagNoData + } + if err := ch.send(response.id, messageTypeData, flags, response.data); err != nil { + logrus.WithError(err).Error("failed sending message on channel") + return } - }(request.id) - case response := <-responses: - p, err := c.server.codec.Marshal(response.resp) - if err != nil { - logrus.WithError(err).Error("failed marshaling response") - return } - if err := ch.send(response.id, messageTypeResponse, p); err != nil { - logrus.WithError(err).Error("failed sending message on channel") - return + if response.closeStream { + // The ttrpc protocol currently does not support the case where + // the server is localClosed but not remoteClosed. Once the server + // is closing, the whole stream may be considered finished + streams.Delete(response.id) + atomic.AddInt32(&active, -1) } - - active-- case err := <-recvErr: // TODO(stevvooe): Not wildly clear what we should do in this // branch. Basically, it means that we are no longer receiving // requests due to a terminal error. recvErr = nil // connection is now "closing" - if err == io.EOF || err == io.ErrUnexpectedEOF { + if err == io.EOF || err == io.ErrUnexpectedEOF || errors.Is(err, syscall.ECONNRESET) { // The client went away and we should stop processing // requests, so that the client connection is closed return } - if err != nil { - logrus.WithError(err).Error("error receiving message") - } + logrus.WithError(err).Error("error receiving message") + // else, initiate shutdown case <-shutdown: return } diff --git a/vendor/github.com/containerd/ttrpc/services.go b/vendor/github.com/containerd/ttrpc/services.go index f359e9611f9e5..6aabfbb4d18d3 100644 --- a/vendor/github.com/containerd/ttrpc/services.go +++ b/vendor/github.com/containerd/ttrpc/services.go @@ -25,43 +25,62 @@ import ( "path" "unsafe" - "github.com/gogo/protobuf/proto" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" ) type Method func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) +type StreamHandler func(context.Context, StreamServer) (interface{}, error) + +type Stream struct { + Handler StreamHandler + StreamingClient bool + StreamingServer bool +} + type ServiceDesc struct { Methods map[string]Method - - // TODO(stevvooe): Add stream support. + Streams map[string]Stream } type serviceSet struct { - services map[string]ServiceDesc - interceptor UnaryServerInterceptor + services map[string]*ServiceDesc + unaryInterceptor UnaryServerInterceptor + streamInterceptor StreamServerInterceptor } func newServiceSet(interceptor UnaryServerInterceptor) *serviceSet { return &serviceSet{ - services: make(map[string]ServiceDesc), - interceptor: interceptor, + services: make(map[string]*ServiceDesc), + unaryInterceptor: interceptor, + streamInterceptor: defaultStreamServerInterceptor, } } -func (s *serviceSet) register(name string, methods map[string]Method) { +func (s *serviceSet) register(name string, desc *ServiceDesc) { if _, ok := s.services[name]; ok { panic(fmt.Errorf("duplicate service %v registered", name)) } - s.services[name] = ServiceDesc{ - Methods: methods, - } + s.services[name] = desc } -func (s *serviceSet) call(ctx context.Context, serviceName, methodName string, p []byte) ([]byte, *status.Status) { - p, err := s.dispatch(ctx, serviceName, methodName, p) +func (s *serviceSet) unaryCall(ctx context.Context, method Method, info *UnaryServerInfo, data []byte) (p []byte, st *status.Status) { + unmarshal := func(obj interface{}) error { + return protoUnmarshal(data, obj) + } + + resp, err := s.unaryInterceptor(ctx, unmarshal, info, method) + if err == nil { + if isNil(resp) { + err = errors.New("ttrpc: marshal called with nil") + } else { + p, err = protoMarshal(resp) + } + } + st, ok := status.FromError(err) if !ok { st = status.New(convertCode(err), err.Error()) @@ -70,38 +89,142 @@ func (s *serviceSet) call(ctx context.Context, serviceName, methodName string, p return p, st } -func (s *serviceSet) dispatch(ctx context.Context, serviceName, methodName string, p []byte) ([]byte, error) { - method, err := s.resolve(serviceName, methodName) - if err != nil { - return nil, err +func (s *serviceSet) streamCall(ctx context.Context, stream StreamHandler, info *StreamServerInfo, ss StreamServer) (p []byte, st *status.Status) { + resp, err := s.streamInterceptor(ctx, ss, info, stream) + if err == nil { + p, err = protoMarshal(resp) + } + st, ok := status.FromError(err) + if !ok { + st = status.New(convertCode(err), err.Error()) } + return +} - unmarshal := func(obj interface{}) error { - switch v := obj.(type) { - case proto.Message: - if err := proto.Unmarshal(p, v); err != nil { - return status.Errorf(codes.Internal, "ttrpc: error unmarshalling payload: %v", err.Error()) +func (s *serviceSet) handle(ctx context.Context, req *Request, respond func(*status.Status, []byte, bool, bool) error) (*streamHandler, error) { + srv, ok := s.services[req.Service] + if !ok { + return nil, status.Errorf(codes.Unimplemented, "service %v", req.Service) + } + + if method, ok := srv.Methods[req.Method]; ok { + go func() { + ctx, cancel := getRequestContext(ctx, req) + defer cancel() + + info := &UnaryServerInfo{ + FullMethod: fullPath(req.Service, req.Method), } - default: - return status.Errorf(codes.Internal, "ttrpc: error unsupported request type: %T", v) + p, st := s.unaryCall(ctx, method, info, req.Payload) + + respond(st, p, false, true) + }() + return nil, nil + } + if stream, ok := srv.Streams[req.Method]; ok { + ctx, cancel := getRequestContext(ctx, req) + info := &StreamServerInfo{ + FullMethod: fullPath(req.Service, req.Method), + StreamingClient: stream.StreamingClient, + StreamingServer: stream.StreamingServer, } - return nil + sh := &streamHandler{ + ctx: ctx, + respond: respond, + recv: make(chan Unmarshaler, 5), + info: info, + } + go func() { + defer cancel() + p, st := s.streamCall(ctx, stream.Handler, info, sh) + respond(st, p, stream.StreamingServer, true) + }() + + if req.Payload != nil { + unmarshal := func(obj interface{}) error { + return protoUnmarshal(req.Payload, obj) + } + if err := sh.data(unmarshal); err != nil { + return nil, err + } + } + + return sh, nil } + return nil, status.Errorf(codes.Unimplemented, "method %v", req.Method) +} + +type streamHandler struct { + ctx context.Context + respond func(*status.Status, []byte, bool, bool) error + recv chan Unmarshaler + info *StreamServerInfo + + remoteClosed bool + localClosed bool +} - info := &UnaryServerInfo{ - FullMethod: fullPath(serviceName, methodName), +func (s *streamHandler) closeSend() { + if !s.remoteClosed { + s.remoteClosed = true + close(s.recv) + } +} + +func (s *streamHandler) data(unmarshal Unmarshaler) error { + if s.remoteClosed { + return ErrStreamClosed + } + select { + case s.recv <- unmarshal: + return nil + case <-s.ctx.Done(): + return s.ctx.Err() } +} - resp, err := s.interceptor(ctx, unmarshal, info, method) +func (s *streamHandler) SendMsg(m interface{}) error { + if s.localClosed { + return ErrStreamClosed + } + p, err := protoMarshal(m) if err != nil { - return nil, err + return err } + return s.respond(nil, p, true, false) +} + +func (s *streamHandler) RecvMsg(m interface{}) error { + select { + case unmarshal, ok := <-s.recv: + if !ok { + return io.EOF + } + return unmarshal(m) + case <-s.ctx.Done(): + return s.ctx.Err() - if isNil(resp) { - return nil, errors.New("ttrpc: marshal called with nil") } +} - switch v := resp.(type) { +func protoUnmarshal(p []byte, obj interface{}) error { + switch v := obj.(type) { + case proto.Message: + if err := proto.Unmarshal(p, v); err != nil { + return status.Errorf(codes.Internal, "ttrpc: error unmarshalling payload: %v", err.Error()) + } + default: + return status.Errorf(codes.Internal, "ttrpc: error unsupported request type: %T", v) + } + return nil +} + +func protoMarshal(obj interface{}) ([]byte, error) { + if obj == nil { + return nil, nil + } + + switch v := obj.(type) { case proto.Message: r, err := proto.Marshal(v) if err != nil { @@ -114,20 +237,6 @@ func (s *serviceSet) dispatch(ctx context.Context, serviceName, methodName strin } } -func (s *serviceSet) resolve(service, method string) (Method, error) { - srv, ok := s.services[service] - if !ok { - return nil, status.Errorf(codes.Unimplemented, "service %v", service) - } - - mthd, ok := srv.Methods[method] - if !ok { - return nil, status.Errorf(codes.Unimplemented, "method %v", method) - } - - return mthd, nil -} - // convertCode maps stdlib go errors into grpc space. // // This is ripped from the grpc-go code base. diff --git a/vendor/github.com/containerd/ttrpc/stream.go b/vendor/github.com/containerd/ttrpc/stream.go new file mode 100644 index 0000000000000..5f264fe67104b --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/stream.go @@ -0,0 +1,81 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +import ( + "context" + "sync" +) + +type streamID uint32 + +type streamMessage struct { + header messageHeader + payload []byte +} + +type stream struct { + id streamID + sender sender + recv chan *streamMessage + + closeOnce sync.Once + recvErr error +} + +func newStream(id streamID, send sender) *stream { + return &stream{ + id: id, + sender: send, + recv: make(chan *streamMessage, 1), + } +} + +func (s *stream) closeWithError(err error) error { + s.closeOnce.Do(func() { + if s.recv != nil { + close(s.recv) + if err != nil { + s.recvErr = err + } else { + s.recvErr = ErrClosed + } + + } + }) + return nil +} + +func (s *stream) send(mt messageType, flags uint8, b []byte) error { + return s.sender.send(uint32(s.id), mt, flags, b) +} + +func (s *stream) receive(ctx context.Context, msg *streamMessage) error { + if s.recvErr != nil { + return s.recvErr + } + select { + case s.recv <- msg: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +type sender interface { + send(uint32, messageType, uint8, []byte) error +} diff --git a/vendor/github.com/containerd/ttrpc/stream_server.go b/vendor/github.com/containerd/ttrpc/stream_server.go new file mode 100644 index 0000000000000..b6d1ba720a437 --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/stream_server.go @@ -0,0 +1,22 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package ttrpc + +type StreamServer interface { + SendMsg(m interface{}) error + RecvMsg(m interface{}) error +} diff --git a/vendor/github.com/containerd/ttrpc/test.proto b/vendor/github.com/containerd/ttrpc/test.proto new file mode 100644 index 0000000000000..0e114d556886f --- /dev/null +++ b/vendor/github.com/containerd/ttrpc/test.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; + +package ttrpc; + +option go_package = "github.com/containerd/ttrpc/internal"; + +message TestPayload { + string foo = 1; + int64 deadline = 2; + string metadata = 3; +} + +message EchoPayload { + int64 seq = 1; + string msg = 2; +} diff --git a/vendor/github.com/containerd/ttrpc/types.go b/vendor/github.com/containerd/ttrpc/types.go deleted file mode 100644 index 9a1c19a7238de..0000000000000 --- a/vendor/github.com/containerd/ttrpc/types.go +++ /dev/null @@ -1,63 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package ttrpc - -import ( - "fmt" - - spb "google.golang.org/genproto/googleapis/rpc/status" -) - -type Request struct { - Service string `protobuf:"bytes,1,opt,name=service,proto3"` - Method string `protobuf:"bytes,2,opt,name=method,proto3"` - Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3"` - TimeoutNano int64 `protobuf:"varint,4,opt,name=timeout_nano,proto3"` - Metadata []*KeyValue `protobuf:"bytes,5,rep,name=metadata,proto3"` -} - -func (r *Request) Reset() { *r = Request{} } -func (r *Request) String() string { return fmt.Sprintf("%+#v", r) } -func (r *Request) ProtoMessage() {} - -type Response struct { - Status *spb.Status `protobuf:"bytes,1,opt,name=status,proto3"` - Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3"` -} - -func (r *Response) Reset() { *r = Response{} } -func (r *Response) String() string { return fmt.Sprintf("%+#v", r) } -func (r *Response) ProtoMessage() {} - -type StringList struct { - List []string `protobuf:"bytes,1,rep,name=list,proto3"` -} - -func (r *StringList) Reset() { *r = StringList{} } -func (r *StringList) String() string { return fmt.Sprintf("%+#v", r) } -func (r *StringList) ProtoMessage() {} - -func makeStringList(item ...string) StringList { return StringList{List: item} } - -type KeyValue struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3"` - Value string `protobuf:"bytes,2,opt,name=value,proto3"` -} - -func (m *KeyValue) Reset() { *m = KeyValue{} } -func (*KeyValue) ProtoMessage() {} -func (m *KeyValue) String() string { return fmt.Sprintf("%+#v", m) } diff --git a/vendor/github.com/containerd/ttrpc/unixcreds_linux.go b/vendor/github.com/containerd/ttrpc/unixcreds_linux.go index a59dad60cd55e..c82c9f9d4c749 100644 --- a/vendor/github.com/containerd/ttrpc/unixcreds_linux.go +++ b/vendor/github.com/containerd/ttrpc/unixcreds_linux.go @@ -29,7 +29,7 @@ import ( type UnixCredentialsFunc func(*unix.Ucred) error -func (fn UnixCredentialsFunc) Handshake(ctx context.Context, conn net.Conn) (net.Conn, interface{}, error) { +func (fn UnixCredentialsFunc) Handshake(_ context.Context, conn net.Conn) (net.Conn, interface{}, error) { uc, err := requireUnixSocket(conn) if err != nil { return nil, nil, fmt.Errorf("ttrpc.UnixCredentialsFunc: require unix socket: %w", err) @@ -50,7 +50,7 @@ func (fn UnixCredentialsFunc) Handshake(ctx context.Context, conn net.Conn) (net } if ucredErr != nil { - return nil, nil, fmt.Errorf("ttrpc.UnixCredentialsFunc: failed to retrieve socket peer credentials: %w", err) + return nil, nil, fmt.Errorf("ttrpc.UnixCredentialsFunc: failed to retrieve socket peer credentials: %w", ucredErr) } if err := fn(ucred); err != nil { @@ -88,10 +88,6 @@ func UnixSocketRequireSameUser() UnixCredentialsFunc { return UnixSocketRequireUidGid(euid, egid) } -func requireRoot(ucred *unix.Ucred) error { - return requireUidGid(ucred, 0, 0) -} - func requireUidGid(ucred *unix.Ucred, uid, gid int) error { if (uid != -1 && uint32(uid) != ucred.Uid) || (gid != -1 && uint32(gid) != ucred.Gid) { return fmt.Errorf("ttrpc: invalid credentials: %v", syscall.EPERM) diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go index 55ed392a016f9..3ad07aa8f2d6b 100644 --- a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go @@ -50,6 +50,12 @@ func fixupResultVersion(netconf, result []byte) (string, []byte, error) { return "", nil, fmt.Errorf("failed to unmarshal raw result: %w", err) } + // plugin output of "null" is successfully unmarshalled, but results in a nil + // map which causes a panic when the confVersion is assigned below. + if rawResult == nil { + rawResult = make(map[string]interface{}) + } + // Manually decode Result version; we need to know whether its cniVersion // is empty, while built-in decoders (correctly) substitute 0.1.0 for an // empty version per the CNI spec. diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go index 439ad28746651..c5b23a81968f6 100644 --- a/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go +++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go @@ -69,6 +69,58 @@ func Enabled() bool { return true } +// StderrIsJournalStream returns whether the process stderr is connected +// to the Journal's stream transport. +// +// This can be used for automatic protocol upgrading described in [Journal Native Protocol]. +// +// Returns true if JOURNAL_STREAM environment variable is present, +// and stderr's device and inode numbers match it. +// +// Error is returned if unexpected error occurs: e.g. if JOURNAL_STREAM environment variable +// is present, but malformed, fstat syscall fails, etc. +// +// [Journal Native Protocol]: https://systemd.io/JOURNAL_NATIVE_PROTOCOL/#automatic-protocol-upgrading +func StderrIsJournalStream() (bool, error) { + return fdIsJournalStream(syscall.Stderr) +} + +// StdoutIsJournalStream returns whether the process stdout is connected +// to the Journal's stream transport. +// +// Returns true if JOURNAL_STREAM environment variable is present, +// and stdout's device and inode numbers match it. +// +// Error is returned if unexpected error occurs: e.g. if JOURNAL_STREAM environment variable +// is present, but malformed, fstat syscall fails, etc. +// +// Most users should probably use [StderrIsJournalStream]. +func StdoutIsJournalStream() (bool, error) { + return fdIsJournalStream(syscall.Stdout) +} + +func fdIsJournalStream(fd int) (bool, error) { + journalStream := os.Getenv("JOURNAL_STREAM") + if journalStream == "" { + return false, nil + } + + var expectedStat syscall.Stat_t + _, err := fmt.Sscanf(journalStream, "%d:%d", &expectedStat.Dev, &expectedStat.Ino) + if err != nil { + return false, fmt.Errorf("failed to parse JOURNAL_STREAM=%q: %v", journalStream, err) + } + + var stat syscall.Stat_t + err = syscall.Fstat(fd, &stat) + if err != nil { + return false, err + } + + match := stat.Dev == expectedStat.Dev && stat.Ino == expectedStat.Ino + return match, nil +} + // Send a message to the local systemd journal. vars is a map of journald // fields to values. Fields must be composed of uppercase letters, numbers, // and underscores, but must not start with an underscore. Within these diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go index 677aca68ed20a..322e41e74c3e6 100644 --- a/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go +++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go @@ -33,3 +33,11 @@ func Enabled() bool { func Send(message string, priority Priority, vars map[string]string) error { return errors.New("could not initialize socket to journald") } + +func StderrIsJournalStream() (bool, error) { + return false, nil +} + +func StdoutIsJournalStream() (bool, error) { + return false, nil +} diff --git a/vendor/github.com/godbus/dbus/v5/auth.go b/vendor/github.com/godbus/dbus/v5/auth.go index a59b4c0eb7659..0f3b252c070f7 100644 --- a/vendor/github.com/godbus/dbus/v5/auth.go +++ b/vendor/github.com/godbus/dbus/v5/auth.go @@ -176,9 +176,10 @@ func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, boo return err, false } state = waitingForReject + } else { + conn.uuid = string(s[1]) + return nil, true } - conn.uuid = string(s[1]) - return nil, true case state == waitingForData: err = authWriteLine(conn.transport, []byte("ERROR")) if err != nil { @@ -191,9 +192,10 @@ func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, boo return err, false } state = waitingForReject + } else { + conn.uuid = string(s[1]) + return nil, true } - conn.uuid = string(s[1]) - return nil, true case state == waitingForOk && string(s[0]) == "DATA": err = authWriteLine(conn.transport, []byte("DATA")) if err != nil { diff --git a/vendor/github.com/godbus/dbus/v5/conn.go b/vendor/github.com/godbus/dbus/v5/conn.go index 76fc5cde3d26d..69978ea26ab63 100644 --- a/vendor/github.com/godbus/dbus/v5/conn.go +++ b/vendor/github.com/godbus/dbus/v5/conn.go @@ -169,7 +169,7 @@ func Connect(address string, opts ...ConnOption) (*Conn, error) { // SystemBusPrivate returns a new private connection to the system bus. // Note: this connection is not ready to use. One must perform Auth and Hello -// on the connection before it is useable. +// on the connection before it is usable. func SystemBusPrivate(opts ...ConnOption) (*Conn, error) { return Dial(getSystemBusPlatformAddress(), opts...) } @@ -284,10 +284,6 @@ func newConn(tr transport, opts ...ConnOption) (*Conn, error) { conn.ctx = context.Background() } conn.ctx, conn.cancelCtx = context.WithCancel(conn.ctx) - go func() { - <-conn.ctx.Done() - conn.Close() - }() conn.calls = newCallTracker() if conn.handler == nil { @@ -302,6 +298,11 @@ func newConn(tr transport, opts ...ConnOption) (*Conn, error) { conn.outHandler = &outputHandler{conn: conn} conn.names = newNameTracker() conn.busObj = conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus") + + go func() { + <-conn.ctx.Done() + conn.Close() + }() return conn, nil } @@ -550,6 +551,11 @@ func (conn *Conn) send(ctx context.Context, msg *Message, ch chan *Call) *Call { call.ctx = ctx call.ctxCanceler = canceler conn.calls.track(msg.serial, call) + if ctx.Err() != nil { + // short path: don't even send the message if context already cancelled + conn.calls.handleSendError(msg, ctx.Err()) + return call + } go func() { <-ctx.Done() conn.calls.handleSendError(msg, ctx.Err()) @@ -649,7 +655,9 @@ func (conn *Conn) RemoveMatchSignalContext(ctx context.Context, options ...Match // Signal registers the given channel to be passed all received signal messages. // -// Multiple of these channels can be registered at the same time. +// Multiple of these channels can be registered at the same time. The channel is +// closed if the Conn is closed; it should not be closed by the caller before +// RemoveSignal was called on it. // // These channels are "overwritten" by Eavesdrop; i.e., if there currently is a // channel for eavesdropped messages, this channel receives all signals, and @@ -765,7 +773,12 @@ func getKey(s, key string) string { for _, keyEqualsValue := range strings.Split(s, ",") { keyValue := strings.SplitN(keyEqualsValue, "=", 2) if len(keyValue) == 2 && keyValue[0] == key { - return keyValue[1] + val, err := UnescapeBusAddressValue(keyValue[1]) + if err != nil { + // No way to return an error. + return "" + } + return val } } return "" diff --git a/vendor/github.com/godbus/dbus/v5/conn_other.go b/vendor/github.com/godbus/dbus/v5/conn_other.go index 616dcf66449f7..90289ca85a28a 100644 --- a/vendor/github.com/godbus/dbus/v5/conn_other.go +++ b/vendor/github.com/godbus/dbus/v5/conn_other.go @@ -54,7 +54,7 @@ func tryDiscoverDbusSessionBusAddress() string { if runUserBusFile := path.Join(runtimeDirectory, "bus"); fileExists(runUserBusFile) { // if /run/user//bus exists, that file itself // *is* the unix socket, so return its path - return fmt.Sprintf("unix:path=%s", runUserBusFile) + return fmt.Sprintf("unix:path=%s", EscapeBusAddressValue(runUserBusFile)) } if runUserSessionDbusFile := path.Join(runtimeDirectory, "dbus-session"); fileExists(runUserSessionDbusFile) { // if /run/user//dbus-session exists, it's a @@ -85,9 +85,6 @@ func getRuntimeDirectory() (string, error) { } func fileExists(filename string) bool { - if _, err := os.Stat(filename); !os.IsNotExist(err) { - return true - } else { - return false - } + _, err := os.Stat(filename) + return !os.IsNotExist(err) } diff --git a/vendor/github.com/godbus/dbus/v5/dbus.go b/vendor/github.com/godbus/dbus/v5/dbus.go index ddf3b7afde9d3..c188d104854fa 100644 --- a/vendor/github.com/godbus/dbus/v5/dbus.go +++ b/vendor/github.com/godbus/dbus/v5/dbus.go @@ -122,8 +122,11 @@ func isConvertibleTo(dest, src reflect.Type) bool { case dest.Kind() == reflect.Slice: return src.Kind() == reflect.Slice && isConvertibleTo(dest.Elem(), src.Elem()) + case dest.Kind() == reflect.Ptr: + dest = dest.Elem() + return isConvertibleTo(dest, src) case dest.Kind() == reflect.Struct: - return src == interfacesType + return src == interfacesType || dest.Kind() == src.Kind() default: return src.ConvertibleTo(dest) } @@ -274,13 +277,8 @@ func storeSliceIntoInterface(dest, src reflect.Value) error { func storeSliceIntoSlice(dest, src reflect.Value) error { if dest.IsNil() || dest.Len() < src.Len() { dest.Set(reflect.MakeSlice(dest.Type(), src.Len(), src.Cap())) - } - if dest.Len() != src.Len() { - return fmt.Errorf( - "dbus.Store: type mismatch: "+ - "slices are different lengths "+ - "need: %d have: %d", - src.Len(), dest.Len()) + } else if dest.Len() > src.Len() { + dest.Set(dest.Slice(0, src.Len())) } for i := 0; i < src.Len(); i++ { err := store(dest.Index(i), getVariantValue(src.Index(i))) diff --git a/vendor/github.com/godbus/dbus/v5/doc.go b/vendor/github.com/godbus/dbus/v5/doc.go index ade1df951cd22..8f25a00d61e26 100644 --- a/vendor/github.com/godbus/dbus/v5/doc.go +++ b/vendor/github.com/godbus/dbus/v5/doc.go @@ -10,8 +10,10 @@ value. Conversion Rules For outgoing messages, Go types are automatically converted to the -corresponding D-Bus types. The following types are directly encoded as their -respective D-Bus equivalents: +corresponding D-Bus types. See the official specification at +https://dbus.freedesktop.org/doc/dbus-specification.html#type-system for more +information on the D-Bus type system. The following types are directly encoded +as their respective D-Bus equivalents: Go type | D-Bus type ------------+----------- @@ -39,8 +41,8 @@ Maps encode as DICTs, provided that their key type can be used as a key for a DICT. Structs other than Variant and Signature encode as a STRUCT containing their -exported fields. Fields whose tags contain `dbus:"-"` and unexported fields will -be skipped. +exported fields in order. Fields whose tags contain `dbus:"-"` and unexported +fields will be skipped. Pointers encode as the value they're pointed to. diff --git a/vendor/github.com/godbus/dbus/v5/escape.go b/vendor/github.com/godbus/dbus/v5/escape.go new file mode 100644 index 0000000000000..d1509d945818e --- /dev/null +++ b/vendor/github.com/godbus/dbus/v5/escape.go @@ -0,0 +1,84 @@ +package dbus + +import "net/url" + +// EscapeBusAddressValue implements a requirement to escape the values +// in D-Bus server addresses, as defined by the D-Bus specification at +// https://dbus.freedesktop.org/doc/dbus-specification.html#addresses. +func EscapeBusAddressValue(val string) string { + toEsc := strNeedsEscape(val) + if toEsc == 0 { + // Avoid unneeded allocation/copying. + return val + } + + // Avoid allocation for short paths. + var buf [64]byte + var out []byte + // Every to-be-escaped byte needs 2 extra bytes. + required := len(val) + 2*toEsc + if required <= len(buf) { + out = buf[:required] + } else { + out = make([]byte, required) + } + + j := 0 + for i := 0; i < len(val); i++ { + if ch := val[i]; needsEscape(ch) { + // Convert ch to %xx, where xx is hex value. + out[j] = '%' + out[j+1] = hexchar(ch >> 4) + out[j+2] = hexchar(ch & 0x0F) + j += 3 + } else { + out[j] = ch + j++ + } + } + + return string(out) +} + +// UnescapeBusAddressValue unescapes values in D-Bus server addresses, +// as defined by the D-Bus specification at +// https://dbus.freedesktop.org/doc/dbus-specification.html#addresses. +func UnescapeBusAddressValue(val string) (string, error) { + // Looks like url.PathUnescape does exactly what is required. + return url.PathUnescape(val) +} + +// hexchar returns an octal representation of a n, where n < 16. +// For invalid values of n, the function panics. +func hexchar(n byte) byte { + const hex = "0123456789abcdef" + + // For n >= len(hex), runtime will panic. + return hex[n] +} + +// needsEscape tells if a byte is NOT one of optionally-escaped bytes. +func needsEscape(c byte) bool { + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + return false + } + switch c { + case '-', '_', '/', '\\', '.', '*': + return false + } + + return true +} + +// strNeedsEscape tells how many bytes in the string need escaping. +func strNeedsEscape(val string) int { + count := 0 + + for i := 0; i < len(val); i++ { + if needsEscape(val[i]) { + count++ + } + } + + return count +} diff --git a/vendor/github.com/godbus/dbus/v5/export.go b/vendor/github.com/godbus/dbus/v5/export.go index 522334715b603..d3dd9f7cd62d5 100644 --- a/vendor/github.com/godbus/dbus/v5/export.go +++ b/vendor/github.com/godbus/dbus/v5/export.go @@ -3,6 +3,7 @@ package dbus import ( "errors" "fmt" + "os" "reflect" "strings" ) @@ -209,28 +210,23 @@ func (conn *Conn) handleCall(msg *Message) { } reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...)) - conn.sendMessageAndIfClosed(reply, nil) + if err := reply.IsValid(); err != nil { + fmt.Fprintf(os.Stderr, "dbus: dropping invalid reply to %s.%s on obj %s: %s\n", ifaceName, name, path, err) + } else { + conn.sendMessageAndIfClosed(reply, nil) + } } } // Emit emits the given signal on the message bus. The name parameter must be // formatted as "interface.member", e.g., "org.freedesktop.DBus.NameLost". func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) error { - if !path.IsValid() { - return errors.New("dbus: invalid object path") - } i := strings.LastIndex(name, ".") if i == -1 { return errors.New("dbus: invalid method name") } iface := name[:i] member := name[i+1:] - if !isValidMember(member) { - return errors.New("dbus: invalid method name") - } - if !isValidInterface(iface) { - return errors.New("dbus: invalid interface name") - } msg := new(Message) msg.Type = TypeSignal msg.Headers = make(map[HeaderField]Variant) @@ -241,6 +237,9 @@ func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) erro if len(values) > 0 { msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...)) } + if err := msg.IsValid(); err != nil { + return err + } var closed bool conn.sendMessageAndIfClosed(msg, func() { diff --git a/vendor/github.com/godbus/dbus/v5/homedir.go b/vendor/github.com/godbus/dbus/v5/homedir.go index 0b745f9313a4a..c44d9b5fc2416 100644 --- a/vendor/github.com/godbus/dbus/v5/homedir.go +++ b/vendor/github.com/godbus/dbus/v5/homedir.go @@ -2,27 +2,24 @@ package dbus import ( "os" - "sync" -) - -var ( - homeDir string - homeDirLock sync.Mutex + "os/user" ) +// Get returns the home directory of the current user, which is usually the +// value of HOME environment variable. In case it is not set or empty, os/user +// package is used. +// +// If linking statically with cgo enabled against glibc, make sure the +// osusergo build tag is used. +// +// If needing to do nss lookups, do not disable cgo or set osusergo. func getHomeDir() string { - homeDirLock.Lock() - defer homeDirLock.Unlock() - + homeDir := os.Getenv("HOME") if homeDir != "" { return homeDir } - - homeDir = os.Getenv("HOME") - if homeDir != "" { - return homeDir + if u, err := user.Current(); err == nil { + return u.HomeDir } - - homeDir = lookupHomeDir() - return homeDir + return "/" } diff --git a/vendor/github.com/godbus/dbus/v5/homedir_dynamic.go b/vendor/github.com/godbus/dbus/v5/homedir_dynamic.go deleted file mode 100644 index 2732081e73b47..0000000000000 --- a/vendor/github.com/godbus/dbus/v5/homedir_dynamic.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !static_build - -package dbus - -import ( - "os/user" -) - -func lookupHomeDir() string { - u, err := user.Current() - if err != nil { - return "/" - } - return u.HomeDir -} diff --git a/vendor/github.com/godbus/dbus/v5/homedir_static.go b/vendor/github.com/godbus/dbus/v5/homedir_static.go deleted file mode 100644 index b9d9cb5525a39..0000000000000 --- a/vendor/github.com/godbus/dbus/v5/homedir_static.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build static_build - -package dbus - -import ( - "bufio" - "os" - "strconv" - "strings" -) - -func lookupHomeDir() string { - myUid := os.Getuid() - - f, err := os.Open("/etc/passwd") - if err != nil { - return "/" - } - defer f.Close() - - s := bufio.NewScanner(f) - - for s.Scan() { - if err := s.Err(); err != nil { - break - } - - line := strings.TrimSpace(s.Text()) - if line == "" { - continue - } - - parts := strings.Split(line, ":") - - if len(parts) >= 6 { - uid, err := strconv.Atoi(parts[2]) - if err == nil && uid == myUid { - return parts[5] - } - } - } - - // Default to / if we can't get a better value - return "/" -} diff --git a/vendor/github.com/godbus/dbus/v5/message.go b/vendor/github.com/godbus/dbus/v5/message.go index 16693eb3017ea..bdf43fdd6e59c 100644 --- a/vendor/github.com/godbus/dbus/v5/message.go +++ b/vendor/github.com/godbus/dbus/v5/message.go @@ -208,7 +208,7 @@ func DecodeMessageWithFDs(rd io.Reader, fds []int) (msg *Message, err error) { // The possibly returned error can be an error of the underlying reader, an // InvalidMessageError or a FormatError. func DecodeMessage(rd io.Reader) (msg *Message, err error) { - return DecodeMessageWithFDs(rd, make([]int, 0)); + return DecodeMessageWithFDs(rd, make([]int, 0)) } type nullwriter struct{} @@ -227,8 +227,8 @@ func (msg *Message) CountFds() (int, error) { } func (msg *Message) EncodeToWithFDs(out io.Writer, order binary.ByteOrder) (fds []int, err error) { - if err := msg.IsValid(); err != nil { - return make([]int, 0), err + if err := msg.validateHeader(); err != nil { + return nil, err } var vs [7]interface{} switch order { @@ -237,7 +237,7 @@ func (msg *Message) EncodeToWithFDs(out io.Writer, order binary.ByteOrder) (fds case binary.BigEndian: vs[0] = byte('B') default: - return make([]int, 0), errors.New("dbus: invalid byte order") + return nil, errors.New("dbus: invalid byte order") } body := new(bytes.Buffer) fds = make([]int, 0) @@ -284,8 +284,13 @@ func (msg *Message) EncodeTo(out io.Writer, order binary.ByteOrder) (err error) } // IsValid checks whether msg is a valid message and returns an -// InvalidMessageError if it is not. +// InvalidMessageError or FormatError if it is not. func (msg *Message) IsValid() error { + var b bytes.Buffer + return msg.EncodeTo(&b, nativeEndian) +} + +func (msg *Message) validateHeader() error { if msg.Flags & ^(FlagNoAutoStart|FlagNoReplyExpected|FlagAllowInteractiveAuthorization) != 0 { return InvalidMessageError("invalid flags") } @@ -330,6 +335,7 @@ func (msg *Message) IsValid() error { return InvalidMessageError("missing signature") } } + return nil } diff --git a/vendor/github.com/godbus/dbus/v5/server_interfaces.go b/vendor/github.com/godbus/dbus/v5/server_interfaces.go index 79d97edf3ece9..e4e0389fdf091 100644 --- a/vendor/github.com/godbus/dbus/v5/server_interfaces.go +++ b/vendor/github.com/godbus/dbus/v5/server_interfaces.go @@ -63,7 +63,7 @@ type Method interface { // any other decoding scheme. type ArgumentDecoder interface { // To decode the arguments of a method the sender and message are - // provided incase the semantics of the implementer provides access + // provided in case the semantics of the implementer provides access // to these as part of the method invocation. DecodeArguments(conn *Conn, sender string, msg *Message, args []interface{}) ([]interface{}, error) } diff --git a/vendor/github.com/godbus/dbus/v5/sig.go b/vendor/github.com/godbus/dbus/v5/sig.go index 41a0398129d25..6b9cadb5fbc37 100644 --- a/vendor/github.com/godbus/dbus/v5/sig.go +++ b/vendor/github.com/godbus/dbus/v5/sig.go @@ -102,7 +102,7 @@ func getSignature(t reflect.Type, depth *depthCounter) (sig string) { } } if len(s) == 0 { - panic("empty struct") + panic(InvalidTypeError{t}) } return "(" + s + ")" case reflect.Array, reflect.Slice: diff --git a/vendor/github.com/godbus/dbus/v5/transport_unix.go b/vendor/github.com/godbus/dbus/v5/transport_unix.go index 2212e7fa7f2d8..0a8c712ebdfa5 100644 --- a/vendor/github.com/godbus/dbus/v5/transport_unix.go +++ b/vendor/github.com/godbus/dbus/v5/transport_unix.go @@ -154,17 +154,15 @@ func (t *unixTransport) ReadMessage() (*Message, error) { // substitute the values in the message body (which are indices for the // array receiver via OOB) with the actual values for i, v := range msg.Body { - switch v.(type) { + switch index := v.(type) { case UnixFDIndex: - j := v.(UnixFDIndex) - if uint32(j) >= unixfds { + if uint32(index) >= unixfds { return nil, InvalidMessageError("invalid index for unix fd") } - msg.Body[i] = UnixFD(fds[j]) + msg.Body[i] = UnixFD(fds[index]) case []UnixFDIndex: - idxArray := v.([]UnixFDIndex) - fdArray := make([]UnixFD, len(idxArray)) - for k, j := range idxArray { + fdArray := make([]UnixFD, len(index)) + for k, j := range index { if uint32(j) >= unixfds { return nil, InvalidMessageError("invalid index for unix fd") } diff --git a/vendor/github.com/godbus/dbus/v5/transport_zos.go b/vendor/github.com/godbus/dbus/v5/transport_zos.go new file mode 100644 index 0000000000000..1bba0d6bf781e --- /dev/null +++ b/vendor/github.com/godbus/dbus/v5/transport_zos.go @@ -0,0 +1,6 @@ +package dbus + +func (t *unixTransport) SendNullByte() error { + _, err := t.Write([]byte{0}) + return err +} diff --git a/vendor/github.com/godbus/dbus/v5/variant.go b/vendor/github.com/godbus/dbus/v5/variant.go index f1e81f3ede671..ca3dbe16a44df 100644 --- a/vendor/github.com/godbus/dbus/v5/variant.go +++ b/vendor/github.com/godbus/dbus/v5/variant.go @@ -49,7 +49,7 @@ func ParseVariant(s string, sig Signature) (Variant, error) { } // format returns a formatted version of v and whether this string can be parsed -// unambigously. +// unambiguously. func (v Variant) format() (string, bool) { switch v.sig.str[0] { case 'b', 'i': diff --git a/vendor/github.com/golang/protobuf/descriptor/descriptor.go b/vendor/github.com/golang/protobuf/descriptor/descriptor.go deleted file mode 100644 index ffde8a65081e4..0000000000000 --- a/vendor/github.com/golang/protobuf/descriptor/descriptor.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package descriptor provides functions for obtaining the protocol buffer -// descriptors of generated Go types. -// -// Deprecated: See the "google.golang.org/protobuf/reflect/protoreflect" package -// for how to obtain an EnumDescriptor or MessageDescriptor in order to -// programatically interact with the protobuf type system. -package descriptor - -import ( - "bytes" - "compress/gzip" - "io/ioutil" - "sync" - - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/reflect/protodesc" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/runtime/protoimpl" - - descriptorpb "github.com/golang/protobuf/protoc-gen-go/descriptor" -) - -// Message is proto.Message with a method to return its descriptor. -// -// Deprecated: The Descriptor method may not be generated by future -// versions of protoc-gen-go, meaning that this interface may not -// be implemented by many concrete message types. -type Message interface { - proto.Message - Descriptor() ([]byte, []int) -} - -// ForMessage returns the file descriptor proto containing -// the message and the message descriptor proto for the message itself. -// The returned proto messages must not be mutated. -// -// Deprecated: Not all concrete message types satisfy the Message interface. -// Use MessageDescriptorProto instead. If possible, the calling code should -// be rewritten to use protobuf reflection instead. -// See package "google.golang.org/protobuf/reflect/protoreflect" for details. -func ForMessage(m Message) (*descriptorpb.FileDescriptorProto, *descriptorpb.DescriptorProto) { - return MessageDescriptorProto(m) -} - -type rawDesc struct { - fileDesc []byte - indexes []int -} - -var rawDescCache sync.Map // map[protoreflect.Descriptor]*rawDesc - -func deriveRawDescriptor(d protoreflect.Descriptor) ([]byte, []int) { - // Fast-path: check whether raw descriptors are already cached. - origDesc := d - if v, ok := rawDescCache.Load(origDesc); ok { - return v.(*rawDesc).fileDesc, v.(*rawDesc).indexes - } - - // Slow-path: derive the raw descriptor from the v2 descriptor. - - // Start with the leaf (a given enum or message declaration) and - // ascend upwards until we hit the parent file descriptor. - var idxs []int - for { - idxs = append(idxs, d.Index()) - d = d.Parent() - if d == nil { - // TODO: We could construct a FileDescriptor stub for standalone - // descriptors to satisfy the API. - return nil, nil - } - if _, ok := d.(protoreflect.FileDescriptor); ok { - break - } - } - - // Obtain the raw file descriptor. - fd := d.(protoreflect.FileDescriptor) - b, _ := proto.Marshal(protodesc.ToFileDescriptorProto(fd)) - file := protoimpl.X.CompressGZIP(b) - - // Reverse the indexes, since we populated it in reverse. - for i, j := 0, len(idxs)-1; i < j; i, j = i+1, j-1 { - idxs[i], idxs[j] = idxs[j], idxs[i] - } - - if v, ok := rawDescCache.LoadOrStore(origDesc, &rawDesc{file, idxs}); ok { - return v.(*rawDesc).fileDesc, v.(*rawDesc).indexes - } - return file, idxs -} - -// EnumRawDescriptor returns the GZIP'd raw file descriptor representing -// the enum and the index path to reach the enum declaration. -// The returned slices must not be mutated. -func EnumRawDescriptor(e proto.GeneratedEnum) ([]byte, []int) { - if ev, ok := e.(interface{ EnumDescriptor() ([]byte, []int) }); ok { - return ev.EnumDescriptor() - } - ed := protoimpl.X.EnumTypeOf(e) - return deriveRawDescriptor(ed.Descriptor()) -} - -// MessageRawDescriptor returns the GZIP'd raw file descriptor representing -// the message and the index path to reach the message declaration. -// The returned slices must not be mutated. -func MessageRawDescriptor(m proto.GeneratedMessage) ([]byte, []int) { - if mv, ok := m.(interface{ Descriptor() ([]byte, []int) }); ok { - return mv.Descriptor() - } - md := protoimpl.X.MessageTypeOf(m) - return deriveRawDescriptor(md.Descriptor()) -} - -var fileDescCache sync.Map // map[*byte]*descriptorpb.FileDescriptorProto - -func deriveFileDescriptor(rawDesc []byte) *descriptorpb.FileDescriptorProto { - // Fast-path: check whether descriptor protos are already cached. - if v, ok := fileDescCache.Load(&rawDesc[0]); ok { - return v.(*descriptorpb.FileDescriptorProto) - } - - // Slow-path: derive the descriptor proto from the GZIP'd message. - zr, err := gzip.NewReader(bytes.NewReader(rawDesc)) - if err != nil { - panic(err) - } - b, err := ioutil.ReadAll(zr) - if err != nil { - panic(err) - } - fd := new(descriptorpb.FileDescriptorProto) - if err := proto.Unmarshal(b, fd); err != nil { - panic(err) - } - if v, ok := fileDescCache.LoadOrStore(&rawDesc[0], fd); ok { - return v.(*descriptorpb.FileDescriptorProto) - } - return fd -} - -// EnumDescriptorProto returns the file descriptor proto representing -// the enum and the enum descriptor proto for the enum itself. -// The returned proto messages must not be mutated. -func EnumDescriptorProto(e proto.GeneratedEnum) (*descriptorpb.FileDescriptorProto, *descriptorpb.EnumDescriptorProto) { - rawDesc, idxs := EnumRawDescriptor(e) - if rawDesc == nil || idxs == nil { - return nil, nil - } - fd := deriveFileDescriptor(rawDesc) - if len(idxs) == 1 { - return fd, fd.EnumType[idxs[0]] - } - md := fd.MessageType[idxs[0]] - for _, i := range idxs[1 : len(idxs)-1] { - md = md.NestedType[i] - } - ed := md.EnumType[idxs[len(idxs)-1]] - return fd, ed -} - -// MessageDescriptorProto returns the file descriptor proto representing -// the message and the message descriptor proto for the message itself. -// The returned proto messages must not be mutated. -func MessageDescriptorProto(m proto.GeneratedMessage) (*descriptorpb.FileDescriptorProto, *descriptorpb.DescriptorProto) { - rawDesc, idxs := MessageRawDescriptor(m) - if rawDesc == nil || idxs == nil { - return nil, nil - } - fd := deriveFileDescriptor(rawDesc) - md := fd.MessageType[idxs[0]] - for _, i := range idxs[1:] { - md = md.NestedType[i] - } - return fd, md -} diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go index 81f54d5ef2ce6..b3283b8158837 100644 --- a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go +++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/client.go @@ -1,16 +1,28 @@ // Copyright 2022 Google LLC. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package client is a cross-platform client for the signer binary (a.k.a."EnterpriseCertSigner"). // -// Client is a cross-platform client for the signer binary (a.k.a."EnterpriseCertSigner"). // The signer binary is OS-specific, but exposes a standard set of APIs for the client to use. package client import ( "crypto" + "crypto/ecdsa" "crypto/rsa" "crypto/x509" "encoding/gob" + "errors" "fmt" "io" "net/rpc" @@ -67,14 +79,16 @@ func (k *Key) CertificateChain() [][]byte { // Close closes the RPC connection and kills the signer subprocess. // Call this to free up resources when the Key object is no longer needed. func (k *Key) Close() error { - if err := k.client.Close(); err != nil { - return fmt.Errorf("failed to close RPC connection: %w", err) - } if err := k.cmd.Process.Kill(); err != nil { return fmt.Errorf("failed to kill signer process: %w", err) } - if err := k.cmd.Wait(); err.Error() != "signal: killed" { - return fmt.Errorf("signer process was not killed: %w", err) + // Wait for cmd to exit and release resources. Since the process is forcefully killed, this + // will return a non-nil error (varies by OS), which we will ignore. + _ = k.cmd.Wait() + // The Pipes connecting the RPC client should have been closed when the signer subprocess was killed. + // Calling `k.client.Close()` before `k.cmd.Process.Kill()` or `k.cmd.Wait()` _will_ cause a segfault. + if err := k.client.Close(); err.Error() != "close |0: file already closed" { + return fmt.Errorf("failed to close RPC connection: %w", err) } return nil } @@ -84,12 +98,19 @@ func (k *Key) Public() crypto.PublicKey { return k.publicKey } -// Sign signs a message by encrypting a message digest, using the specified signer options. +// Sign signs a message digest, using the specified signer options. func (k *Key) Sign(_ io.Reader, digest []byte, opts crypto.SignerOpts) (signed []byte, err error) { + if opts != nil && opts.HashFunc() != 0 && len(digest) != opts.HashFunc().Size() { + return nil, fmt.Errorf("Digest length of %v bytes does not match Hash function size of %v bytes", len(digest), opts.HashFunc().Size()) + } err = k.client.Call(signAPI, SignArgs{Digest: digest, Opts: opts}, &signed) return } +// ErrCredUnavailable is a sentinel error that indicates ECP Cred is unavailable, +// possibly due to missing config or missing binary path. +var ErrCredUnavailable = errors.New("Cred is unavailable") + // Cred spawns a signer subprocess that listens on stdin/stdout to perform certificate // related operations, including signing messages with the private key. // @@ -103,6 +124,9 @@ func Cred(configFilePath string) (*Key, error) { } enterpriseCertSignerPath, err := util.LoadSignerBinaryPath(configFilePath) if err != nil { + if errors.Is(err, util.ErrConfigUnavailable) { + return nil, ErrCredUnavailable + } return nil, err } k := &Key{ @@ -147,5 +171,15 @@ func Cred(configFilePath string) (*Key, error) { return nil, fmt.Errorf("invalid public key type: %T", publicKey) } + switch pub := k.publicKey.(type) { + case *rsa.PublicKey: + if pub.Size() < 256 { + return nil, fmt.Errorf("RSA modulus size is less than 2048 bits: %v", pub.Size()*8) + } + case *ecdsa.PublicKey: + default: + return nil, fmt.Errorf("unsupported public key type: %v", pub) + } + return k, nil } diff --git a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go index 6b5f2806e6bf4..1640ec1c9e31a 100644 --- a/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go +++ b/vendor/github.com/googleapis/enterprise-certificate-proxy/client/util/util.go @@ -1,17 +1,30 @@ +// Copyright 2022 Google LLC. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + // Package util provides helper functions for the client. package util import ( "encoding/json" "errors" - "io/ioutil" + "io" "os" "os/user" "path/filepath" "runtime" ) -const configFileName = "enterprise_certificate_config.json" +const configFileName = "certificate_config.json" // EnterpriseCertificateConfig contains parameters for initializing signer. type EnterpriseCertificateConfig struct { @@ -20,17 +33,24 @@ type EnterpriseCertificateConfig struct { // Libs specifies the locations of helper libraries. type Libs struct { - SignerBinary string `json:"signer_binary"` + ECP string `json:"ecp"` } +// ErrConfigUnavailable is a sentinel error that indicates ECP config is unavailable, +// possibly due to entire config missing or missing binary path. +var ErrConfigUnavailable = errors.New("Config is unavailable") + // LoadSignerBinaryPath retrieves the path of the signer binary from the config file. func LoadSignerBinaryPath(configFilePath string) (path string, err error) { jsonFile, err := os.Open(configFilePath) if err != nil { + if errors.Is(err, os.ErrNotExist) { + return "", ErrConfigUnavailable + } return "", err } - byteValue, err := ioutil.ReadAll(jsonFile) + byteValue, err := io.ReadAll(jsonFile) if err != nil { return "", err } @@ -39,9 +59,9 @@ func LoadSignerBinaryPath(configFilePath string) (path string, err error) { if err != nil { return "", err } - signerBinaryPath := config.Libs.SignerBinary + signerBinaryPath := config.Libs.ECP if signerBinaryPath == "" { - return "", errors.New("Signer binary path is missing.") + return "", ErrConfigUnavailable } return signerBinaryPath, nil } @@ -61,9 +81,8 @@ func guessHomeDir() string { func getDefaultConfigFileDirectory() (directory string) { if runtime.GOOS == "windows" { return filepath.Join(os.Getenv("APPDATA"), "gcloud") - } else { - return filepath.Join(guessHomeDir(), ".config/gcloud") } + return filepath.Join(guessHomeDir(), ".config/gcloud") } // GetDefaultConfigFilePath returns the default path of the enterprise certificate config file created by gCloud. diff --git a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json index 0e643a05b571c..d88960b7ef17d 100644 --- a/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json +++ b/vendor/github.com/googleapis/gax-go/v2/.release-please-manifest.json @@ -1,3 +1,3 @@ { - "v2": "2.4.0" + "v2": "2.7.0" } diff --git a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md index b42ace44c9816..b75170f2227cc 100644 --- a/vendor/github.com/googleapis/gax-go/v2/CHANGES.md +++ b/vendor/github.com/googleapis/gax-go/v2/CHANGES.md @@ -1,5 +1,34 @@ # Changelog +## [2.7.0](https://github.com/googleapis/gax-go/compare/v2.6.0...v2.7.0) (2022-11-02) + + +### Features + +* update google.golang.org/api to latest ([#240](https://github.com/googleapis/gax-go/issues/240)) ([f690a02](https://github.com/googleapis/gax-go/commit/f690a02c806a2903bdee943ede3a58e3a331ebd6)) +* **v2/apierror:** add apierror.FromWrappingError ([#238](https://github.com/googleapis/gax-go/issues/238)) ([9dbd96d](https://github.com/googleapis/gax-go/commit/9dbd96d59b9d54ceb7c025513aa8c1a9d727382f)) + +## [2.6.0](https://github.com/googleapis/gax-go/compare/v2.5.1...v2.6.0) (2022-10-13) + + +### Features + +* **v2:** copy DetermineContentType functionality ([#230](https://github.com/googleapis/gax-go/issues/230)) ([2c52a70](https://github.com/googleapis/gax-go/commit/2c52a70bae965397f740ed27d46aabe89ff249b3)) + +## [2.5.1](https://github.com/googleapis/gax-go/compare/v2.5.0...v2.5.1) (2022-08-04) + + +### Bug Fixes + +* **v2:** resolve bad genproto pseudoversion in go.mod ([#218](https://github.com/googleapis/gax-go/issues/218)) ([1379b27](https://github.com/googleapis/gax-go/commit/1379b27e9846d959f7e1163b9ef298b3c92c8d23)) + +## [2.5.0](https://github.com/googleapis/gax-go/compare/v2.4.0...v2.5.0) (2022-08-04) + + +### Features + +* add ExtractProtoMessage to apierror ([#213](https://github.com/googleapis/gax-go/issues/213)) ([a6ce70c](https://github.com/googleapis/gax-go/commit/a6ce70c725c890533a9de6272d3b5ba2e336d6bb)) + ## [2.4.0](https://github.com/googleapis/gax-go/compare/v2.3.0...v2.4.0) (2022-05-09) diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go index 7d0128a0cd537..aa6be1304f176 100644 --- a/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/apierror.go @@ -41,6 +41,7 @@ import ( "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc/status" "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" ) // ErrDetails holds the google/rpc/error_details.proto messages. @@ -60,6 +61,30 @@ type ErrDetails struct { Unknown []interface{} } +// ErrMessageNotFound is used to signal ExtractProtoMessage found no matching messages. +var ErrMessageNotFound = errors.New("message not found") + +// ExtractProtoMessage provides a mechanism for extracting protobuf messages from the +// Unknown error details. If ExtractProtoMessage finds an unknown message of the same type, +// the content of the message is copied to the provided message. +// +// ExtractProtoMessage will return ErrMessageNotFound if there are no message matching the +// protocol buffer type of the provided message. +func (e ErrDetails) ExtractProtoMessage(v proto.Message) error { + if v == nil { + return ErrMessageNotFound + } + for _, elem := range e.Unknown { + if elemProto, ok := elem.(proto.Message); ok { + if v.ProtoReflect().Type() == elemProto.ProtoReflect().Type() { + proto.Merge(v, elemProto) + return nil + } + } + } + return ErrMessageNotFound +} + func (e ErrDetails) String() string { var d strings.Builder if e.ErrorInfo != nil { @@ -208,30 +233,49 @@ func (a *APIError) Metadata() map[string]string { } -// FromError parses a Status error or a googleapi.Error and builds an APIError. -func FromError(err error) (*APIError, bool) { - if err == nil { - return nil, false - } - - ae := APIError{err: err} +// setDetailsFromError parses a Status error or a googleapi.Error +// and sets status and details or httpErr and details, respectively. +// It returns false if neither Status nor googleapi.Error can be parsed. +func (a *APIError) setDetailsFromError(err error) bool { st, isStatus := status.FromError(err) var herr *googleapi.Error isHTTPErr := errors.As(err, &herr) switch { case isStatus: - ae.status = st - ae.details = parseDetails(st.Details()) + a.status = st + a.details = parseDetails(st.Details()) case isHTTPErr: - ae.httpErr = herr - ae.details = parseHTTPDetails(herr) + a.httpErr = herr + a.details = parseHTTPDetails(herr) default: - return nil, false + return false } + return true +} - return &ae, true +// FromError parses a Status error or a googleapi.Error and builds an +// APIError, wrapping the provided error in the new APIError. It +// returns false if neither Status nor googleapi.Error can be parsed. +func FromError(err error) (*APIError, bool) { + return ParseError(err, true) +} +// ParseError parses a Status error or a googleapi.Error and builds an +// APIError. If wrap is true, it wraps the error in the new APIError. +// It returns false if neither Status nor googleapi.Error can be parsed. +func ParseError(err error, wrap bool) (*APIError, bool) { + if err == nil { + return nil, false + } + ae := APIError{} + if wrap { + ae = APIError{err: err} + } + if !ae.setDetailsFromError(err) { + return nil, false + } + return &ae, true } // parseDetails accepts a slice of interface{} that should be backed by some diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go new file mode 100644 index 0000000000000..e4b03f161d823 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.pb.go @@ -0,0 +1,256 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.0 +// protoc v3.17.3 +// source: custom_error.proto + +package jsonerror + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Error code for `CustomError`. +type CustomError_CustomErrorCode int32 + +const ( + // Default error. + CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED CustomError_CustomErrorCode = 0 + // Too many foo. + CustomError_TOO_MANY_FOO CustomError_CustomErrorCode = 1 + // Not enough foo. + CustomError_NOT_ENOUGH_FOO CustomError_CustomErrorCode = 2 + // Catastrophic error. + CustomError_UNIVERSE_WAS_DESTROYED CustomError_CustomErrorCode = 3 +) + +// Enum value maps for CustomError_CustomErrorCode. +var ( + CustomError_CustomErrorCode_name = map[int32]string{ + 0: "CUSTOM_ERROR_CODE_UNSPECIFIED", + 1: "TOO_MANY_FOO", + 2: "NOT_ENOUGH_FOO", + 3: "UNIVERSE_WAS_DESTROYED", + } + CustomError_CustomErrorCode_value = map[string]int32{ + "CUSTOM_ERROR_CODE_UNSPECIFIED": 0, + "TOO_MANY_FOO": 1, + "NOT_ENOUGH_FOO": 2, + "UNIVERSE_WAS_DESTROYED": 3, + } +) + +func (x CustomError_CustomErrorCode) Enum() *CustomError_CustomErrorCode { + p := new(CustomError_CustomErrorCode) + *p = x + return p +} + +func (x CustomError_CustomErrorCode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CustomError_CustomErrorCode) Descriptor() protoreflect.EnumDescriptor { + return file_custom_error_proto_enumTypes[0].Descriptor() +} + +func (CustomError_CustomErrorCode) Type() protoreflect.EnumType { + return &file_custom_error_proto_enumTypes[0] +} + +func (x CustomError_CustomErrorCode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CustomError_CustomErrorCode.Descriptor instead. +func (CustomError_CustomErrorCode) EnumDescriptor() ([]byte, []int) { + return file_custom_error_proto_rawDescGZIP(), []int{0, 0} +} + +// CustomError is an example of a custom error message which may be included +// in an rpc status. It is not meant to reflect a standard error. +type CustomError struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Error code specific to the custom API being invoked. + Code CustomError_CustomErrorCode `protobuf:"varint,1,opt,name=code,proto3,enum=error.CustomError_CustomErrorCode" json:"code,omitempty"` + // Name of the failed entity. + Entity string `protobuf:"bytes,2,opt,name=entity,proto3" json:"entity,omitempty"` + // Message that describes the error. + ErrorMessage string `protobuf:"bytes,3,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *CustomError) Reset() { + *x = CustomError{} + if protoimpl.UnsafeEnabled { + mi := &file_custom_error_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CustomError) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CustomError) ProtoMessage() {} + +func (x *CustomError) ProtoReflect() protoreflect.Message { + mi := &file_custom_error_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CustomError.ProtoReflect.Descriptor instead. +func (*CustomError) Descriptor() ([]byte, []int) { + return file_custom_error_proto_rawDescGZIP(), []int{0} +} + +func (x *CustomError) GetCode() CustomError_CustomErrorCode { + if x != nil { + return x.Code + } + return CustomError_CUSTOM_ERROR_CODE_UNSPECIFIED +} + +func (x *CustomError) GetEntity() string { + if x != nil { + return x.Entity + } + return "" +} + +func (x *CustomError) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +var File_custom_error_proto protoreflect.FileDescriptor + +var file_custom_error_proto_rawDesc = []byte{ + 0x0a, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xfa, 0x01, 0x0a, 0x0b, + 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x22, 0x76, 0x0a, 0x0f, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x43, + 0x6f, 0x64, 0x65, 0x12, 0x21, 0x0a, 0x1d, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x45, 0x52, + 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, + 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x4f, 0x4f, 0x5f, 0x4d, 0x41, + 0x4e, 0x59, 0x5f, 0x46, 0x4f, 0x4f, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4e, 0x4f, 0x54, 0x5f, + 0x45, 0x4e, 0x4f, 0x55, 0x47, 0x48, 0x5f, 0x46, 0x4f, 0x4f, 0x10, 0x02, 0x12, 0x1a, 0x0a, 0x16, + 0x55, 0x4e, 0x49, 0x56, 0x45, 0x52, 0x53, 0x45, 0x5f, 0x57, 0x41, 0x53, 0x5f, 0x44, 0x45, 0x53, + 0x54, 0x52, 0x4f, 0x59, 0x45, 0x44, 0x10, 0x03, 0x42, 0x43, 0x5a, 0x41, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2f, 0x67, 0x61, 0x78, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x32, 0x2f, 0x61, 0x70, 0x69, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x3b, 0x6a, 0x73, 0x6f, 0x6e, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_custom_error_proto_rawDescOnce sync.Once + file_custom_error_proto_rawDescData = file_custom_error_proto_rawDesc +) + +func file_custom_error_proto_rawDescGZIP() []byte { + file_custom_error_proto_rawDescOnce.Do(func() { + file_custom_error_proto_rawDescData = protoimpl.X.CompressGZIP(file_custom_error_proto_rawDescData) + }) + return file_custom_error_proto_rawDescData +} + +var file_custom_error_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_custom_error_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_custom_error_proto_goTypes = []interface{}{ + (CustomError_CustomErrorCode)(0), // 0: error.CustomError.CustomErrorCode + (*CustomError)(nil), // 1: error.CustomError +} +var file_custom_error_proto_depIdxs = []int32{ + 0, // 0: error.CustomError.code:type_name -> error.CustomError.CustomErrorCode + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_custom_error_proto_init() } +func file_custom_error_proto_init() { + if File_custom_error_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_custom_error_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CustomError); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_custom_error_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_custom_error_proto_goTypes, + DependencyIndexes: file_custom_error_proto_depIdxs, + EnumInfos: file_custom_error_proto_enumTypes, + MessageInfos: file_custom_error_proto_msgTypes, + }.Build() + File_custom_error_proto = out.File + file_custom_error_proto_rawDesc = nil + file_custom_error_proto_goTypes = nil + file_custom_error_proto_depIdxs = nil +} diff --git a/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto new file mode 100644 index 0000000000000..21678ae65c99e --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/apierror/internal/proto/custom_error.proto @@ -0,0 +1,50 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package error; + +option go_package = "github.com/googleapis/gax-go/v2/apierror/internal/proto;jsonerror"; + + +// CustomError is an example of a custom error message which may be included +// in an rpc status. It is not meant to reflect a standard error. +message CustomError { + + // Error code for `CustomError`. + enum CustomErrorCode { + // Default error. + CUSTOM_ERROR_CODE_UNSPECIFIED = 0; + + // Too many foo. + TOO_MANY_FOO = 1; + + // Not enough foo. + NOT_ENOUGH_FOO = 2; + + // Catastrophic error. + UNIVERSE_WAS_DESTROYED = 3; + + } + + // Error code specific to the custom API being invoked. + CustomErrorCode code = 1; + + // Name of the failed entity. + string entity = 2; + + // Message that describes the error. + string error_message = 3; +} diff --git a/vendor/github.com/googleapis/gax-go/v2/content_type.go b/vendor/github.com/googleapis/gax-go/v2/content_type.go new file mode 100644 index 0000000000000..1b53d0a3ac1a8 --- /dev/null +++ b/vendor/github.com/googleapis/gax-go/v2/content_type.go @@ -0,0 +1,112 @@ +// Copyright 2022, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gax + +import ( + "io" + "io/ioutil" + "net/http" +) + +const sniffBuffSize = 512 + +func newContentSniffer(r io.Reader) *contentSniffer { + return &contentSniffer{r: r} +} + +// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader. +type contentSniffer struct { + r io.Reader + start []byte // buffer for the sniffed bytes. + err error // set to any error encountered while reading bytes to be sniffed. + + ctype string // set on first sniff. + sniffed bool // set to true on first sniff. +} + +func (cs *contentSniffer) Read(p []byte) (n int, err error) { + // Ensure that the content type is sniffed before any data is consumed from Reader. + _, _ = cs.ContentType() + + if len(cs.start) > 0 { + n := copy(p, cs.start) + cs.start = cs.start[n:] + return n, nil + } + + // We may have read some bytes into start while sniffing, even if the read ended in an error. + // We should first return those bytes, then the error. + if cs.err != nil { + return 0, cs.err + } + + // Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader. + return cs.r.Read(p) +} + +// ContentType returns the sniffed content type, and whether the content type was successfully sniffed. +func (cs *contentSniffer) ContentType() (string, bool) { + if cs.sniffed { + return cs.ctype, cs.ctype != "" + } + cs.sniffed = true + // If ReadAll hits EOF, it returns err==nil. + cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize)) + + // Don't try to detect the content type based on possibly incomplete data. + if cs.err != nil { + return "", false + } + + cs.ctype = http.DetectContentType(cs.start) + return cs.ctype, true +} + +// DetermineContentType determines the content type of the supplied reader. +// The content of media will be sniffed to determine the content type. +// After calling DetectContentType the caller must not perform further reads on +// media, but rather read from the Reader that is returned. +func DetermineContentType(media io.Reader) (io.Reader, string) { + // For backwards compatibility, allow clients to set content + // type by providing a ContentTyper for media. + // Note: This is an anonymous interface definition copied from googleapi.ContentTyper. + if typer, ok := media.(interface { + ContentType() string + }); ok { + return media, typer.ContentType() + } + + sniffer := newContentSniffer(media) + if ctype, ok := sniffer.ContentType(); ok { + return sniffer, ctype + } + // If content type could not be sniffed, reads from sniffer will eventually fail with an error. + return sniffer, "" +} diff --git a/vendor/github.com/googleapis/gax-go/v2/internal/version.go b/vendor/github.com/googleapis/gax-go/v2/internal/version.go index bf272a5045c26..0ba5da1dd1eaf 100644 --- a/vendor/github.com/googleapis/gax-go/v2/internal/version.go +++ b/vendor/github.com/googleapis/gax-go/v2/internal/version.go @@ -30,4 +30,4 @@ package internal // Version is the current tagged release of the library. -const Version = "2.4.0" +const Version = "2.7.0" diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel deleted file mode 100644 index 5242751fb2d5a..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel +++ /dev/null @@ -1,23 +0,0 @@ -load("@rules_proto//proto:defs.bzl", "proto_library") -load("@io_bazel_rules_go//go:def.bzl", "go_library") -load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") - -package(default_visibility = ["//visibility:public"]) - -proto_library( - name = "internal_proto", - srcs = ["errors.proto"], - deps = ["@com_google_protobuf//:any_proto"], -) - -go_proto_library( - name = "internal_go_proto", - importpath = "github.com/grpc-ecosystem/grpc-gateway/internal", - proto = ":internal_proto", -) - -go_library( - name = "go_default_library", - embed = [":internal_go_proto"], - importpath = "github.com/grpc-ecosystem/grpc-gateway/internal", -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go deleted file mode 100644 index 61101d7177f05..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go +++ /dev/null @@ -1,189 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: internal/errors.proto - -package internal - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - any "github.com/golang/protobuf/ptypes/any" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// Error is the generic error returned from unary RPCs. -type Error struct { - Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` - // This is to make the error more compatible with users that expect errors to be Status objects: - // https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto - // It should be the exact same message as the Error field. - Code int32 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"` - Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` - Details []*any.Any `protobuf:"bytes,4,rep,name=details,proto3" json:"details,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Error) Reset() { *m = Error{} } -func (m *Error) String() string { return proto.CompactTextString(m) } -func (*Error) ProtoMessage() {} -func (*Error) Descriptor() ([]byte, []int) { - return fileDescriptor_9b093362ca6d1e03, []int{0} -} - -func (m *Error) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Error.Unmarshal(m, b) -} -func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Error.Marshal(b, m, deterministic) -} -func (m *Error) XXX_Merge(src proto.Message) { - xxx_messageInfo_Error.Merge(m, src) -} -func (m *Error) XXX_Size() int { - return xxx_messageInfo_Error.Size(m) -} -func (m *Error) XXX_DiscardUnknown() { - xxx_messageInfo_Error.DiscardUnknown(m) -} - -var xxx_messageInfo_Error proto.InternalMessageInfo - -func (m *Error) GetError() string { - if m != nil { - return m.Error - } - return "" -} - -func (m *Error) GetCode() int32 { - if m != nil { - return m.Code - } - return 0 -} - -func (m *Error) GetMessage() string { - if m != nil { - return m.Message - } - return "" -} - -func (m *Error) GetDetails() []*any.Any { - if m != nil { - return m.Details - } - return nil -} - -// StreamError is a response type which is returned when -// streaming rpc returns an error. -type StreamError struct { - GrpcCode int32 `protobuf:"varint,1,opt,name=grpc_code,json=grpcCode,proto3" json:"grpc_code,omitempty"` - HttpCode int32 `protobuf:"varint,2,opt,name=http_code,json=httpCode,proto3" json:"http_code,omitempty"` - Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` - HttpStatus string `protobuf:"bytes,4,opt,name=http_status,json=httpStatus,proto3" json:"http_status,omitempty"` - Details []*any.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StreamError) Reset() { *m = StreamError{} } -func (m *StreamError) String() string { return proto.CompactTextString(m) } -func (*StreamError) ProtoMessage() {} -func (*StreamError) Descriptor() ([]byte, []int) { - return fileDescriptor_9b093362ca6d1e03, []int{1} -} - -func (m *StreamError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StreamError.Unmarshal(m, b) -} -func (m *StreamError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StreamError.Marshal(b, m, deterministic) -} -func (m *StreamError) XXX_Merge(src proto.Message) { - xxx_messageInfo_StreamError.Merge(m, src) -} -func (m *StreamError) XXX_Size() int { - return xxx_messageInfo_StreamError.Size(m) -} -func (m *StreamError) XXX_DiscardUnknown() { - xxx_messageInfo_StreamError.DiscardUnknown(m) -} - -var xxx_messageInfo_StreamError proto.InternalMessageInfo - -func (m *StreamError) GetGrpcCode() int32 { - if m != nil { - return m.GrpcCode - } - return 0 -} - -func (m *StreamError) GetHttpCode() int32 { - if m != nil { - return m.HttpCode - } - return 0 -} - -func (m *StreamError) GetMessage() string { - if m != nil { - return m.Message - } - return "" -} - -func (m *StreamError) GetHttpStatus() string { - if m != nil { - return m.HttpStatus - } - return "" -} - -func (m *StreamError) GetDetails() []*any.Any { - if m != nil { - return m.Details - } - return nil -} - -func init() { - proto.RegisterType((*Error)(nil), "grpc.gateway.runtime.Error") - proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError") -} - -func init() { proto.RegisterFile("internal/errors.proto", fileDescriptor_9b093362ca6d1e03) } - -var fileDescriptor_9b093362ca6d1e03 = []byte{ - // 252 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x90, 0xc1, 0x4a, 0xc4, 0x30, - 0x10, 0x86, 0x89, 0xbb, 0x75, 0xdb, 0xe9, 0x2d, 0x54, 0x88, 0xee, 0xc1, 0xb2, 0xa7, 0x9e, 0x52, - 0xd0, 0x27, 0xd0, 0xc5, 0x17, 0xe8, 0xde, 0xbc, 0x2c, 0xd9, 0xdd, 0x31, 0x16, 0xda, 0xa4, 0x24, - 0x53, 0xa4, 0xf8, 0x56, 0x3e, 0xa1, 0x24, 0xa5, 0xb0, 0x27, 0xf1, 0xd6, 0xf9, 0xfb, 0xcf, 0x7c, - 0x1f, 0x81, 0xbb, 0xd6, 0x10, 0x3a, 0xa3, 0xba, 0x1a, 0x9d, 0xb3, 0xce, 0xcb, 0xc1, 0x59, 0xb2, - 0xbc, 0xd0, 0x6e, 0x38, 0x4b, 0xad, 0x08, 0xbf, 0xd4, 0x24, 0xdd, 0x68, 0xa8, 0xed, 0xf1, 0xe1, - 0x5e, 0x5b, 0xab, 0x3b, 0xac, 0x63, 0xe7, 0x34, 0x7e, 0xd4, 0xca, 0x4c, 0xf3, 0xc2, 0xee, 0x1b, - 0x92, 0xb7, 0x70, 0x80, 0x17, 0x90, 0xc4, 0x4b, 0x82, 0x95, 0xac, 0xca, 0x9a, 0x79, 0xe0, 0x1c, - 0xd6, 0x67, 0x7b, 0x41, 0x71, 0x53, 0xb2, 0x2a, 0x69, 0xe2, 0x37, 0x17, 0xb0, 0xe9, 0xd1, 0x7b, - 0xa5, 0x51, 0xac, 0x62, 0x77, 0x19, 0xb9, 0x84, 0xcd, 0x05, 0x49, 0xb5, 0x9d, 0x17, 0xeb, 0x72, - 0x55, 0xe5, 0x4f, 0x85, 0x9c, 0xc9, 0x72, 0x21, 0xcb, 0x17, 0x33, 0x35, 0x4b, 0x69, 0xf7, 0xc3, - 0x20, 0x3f, 0x90, 0x43, 0xd5, 0xcf, 0x0e, 0x5b, 0xc8, 0x82, 0xff, 0x31, 0x22, 0x59, 0x44, 0xa6, - 0x21, 0xd8, 0x07, 0xec, 0x16, 0xb2, 0x4f, 0xa2, 0xe1, 0x78, 0xe5, 0x93, 0x86, 0x60, 0xff, 0xb7, - 0xd3, 0x23, 0xe4, 0x71, 0xcd, 0x93, 0xa2, 0x31, 0x78, 0x85, 0xbf, 0x10, 0xa2, 0x43, 0x4c, 0xae, - 0xa5, 0x93, 0x7f, 0x48, 0xbf, 0xc2, 0x7b, 0xba, 0xbc, 0xfd, 0xe9, 0x36, 0x56, 0x9e, 0x7f, 0x03, - 0x00, 0x00, 0xff, 0xff, 0xde, 0x72, 0x6b, 0x83, 0x8e, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.proto deleted file mode 100644 index 4fb212c6b690d..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.proto +++ /dev/null @@ -1,26 +0,0 @@ -syntax = "proto3"; -package grpc.gateway.runtime; -option go_package = "internal"; - -import "google/protobuf/any.proto"; - -// Error is the generic error returned from unary RPCs. -message Error { - string error = 1; - // This is to make the error more compatible with users that expect errors to be Status objects: - // https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto - // It should be the exact same message as the Error field. - int32 code = 2; - string message = 3; - repeated google.protobuf.Any details = 4; -} - -// StreamError is a response type which is returned when -// streaming rpc returns an error. -message StreamError { - int32 grpc_code = 1; - int32 http_code = 2; - string message = 3; - string http_status = 4; - repeated google.protobuf.Any details = 5; -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel deleted file mode 100644 index 58b72b9cf7513..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel +++ /dev/null @@ -1,85 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -package(default_visibility = ["//visibility:public"]) - -go_library( - name = "go_default_library", - srcs = [ - "context.go", - "convert.go", - "doc.go", - "errors.go", - "fieldmask.go", - "handler.go", - "marshal_httpbodyproto.go", - "marshal_json.go", - "marshal_jsonpb.go", - "marshal_proto.go", - "marshaler.go", - "marshaler_registry.go", - "mux.go", - "pattern.go", - "proto2_convert.go", - "proto_errors.go", - "query.go", - ], - importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime", - deps = [ - "//internal:go_default_library", - "//utilities:go_default_library", - "@com_github_golang_protobuf//descriptor:go_default_library_gen", - "@com_github_golang_protobuf//jsonpb:go_default_library_gen", - "@com_github_golang_protobuf//proto:go_default_library", - "@go_googleapis//google/api:httpbody_go_proto", - "@io_bazel_rules_go//proto/wkt:any_go_proto", - "@io_bazel_rules_go//proto/wkt:descriptor_go_proto", - "@io_bazel_rules_go//proto/wkt:duration_go_proto", - "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", - "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", - "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", - "@org_golang_google_grpc//codes:go_default_library", - "@org_golang_google_grpc//grpclog:go_default_library", - "@org_golang_google_grpc//metadata:go_default_library", - "@org_golang_google_grpc//status:go_default_library", - ], -) - -go_test( - name = "go_default_test", - size = "small", - srcs = [ - "context_test.go", - "convert_test.go", - "errors_test.go", - "fieldmask_test.go", - "handler_test.go", - "marshal_httpbodyproto_test.go", - "marshal_json_test.go", - "marshal_jsonpb_test.go", - "marshal_proto_test.go", - "marshaler_registry_test.go", - "mux_test.go", - "pattern_test.go", - "query_test.go", - ], - embed = [":go_default_library"], - deps = [ - "//internal:go_default_library", - "//runtime/internal/examplepb:go_default_library", - "//utilities:go_default_library", - "@com_github_golang_protobuf//jsonpb:go_default_library_gen", - "@com_github_golang_protobuf//proto:go_default_library", - "@com_github_golang_protobuf//ptypes:go_default_library_gen", - "@go_googleapis//google/api:httpbody_go_proto", - "@go_googleapis//google/rpc:errdetails_go_proto", - "@io_bazel_rules_go//proto/wkt:duration_go_proto", - "@io_bazel_rules_go//proto/wkt:empty_go_proto", - "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", - "@io_bazel_rules_go//proto/wkt:struct_go_proto", - "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", - "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", - "@org_golang_google_grpc//codes:go_default_library", - "@org_golang_google_grpc//metadata:go_default_library", - "@org_golang_google_grpc//status:go_default_library", - ], -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go deleted file mode 100644 index b2ce743bddcd6..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go +++ /dev/null @@ -1,186 +0,0 @@ -package runtime - -import ( - "context" - "io" - "net/http" - "strings" - - "github.com/grpc-ecosystem/grpc-gateway/internal" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status. -// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto -func HTTPStatusFromCode(code codes.Code) int { - switch code { - case codes.OK: - return http.StatusOK - case codes.Canceled: - return http.StatusRequestTimeout - case codes.Unknown: - return http.StatusInternalServerError - case codes.InvalidArgument: - return http.StatusBadRequest - case codes.DeadlineExceeded: - return http.StatusGatewayTimeout - case codes.NotFound: - return http.StatusNotFound - case codes.AlreadyExists: - return http.StatusConflict - case codes.PermissionDenied: - return http.StatusForbidden - case codes.Unauthenticated: - return http.StatusUnauthorized - case codes.ResourceExhausted: - return http.StatusTooManyRequests - case codes.FailedPrecondition: - // Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status. - return http.StatusBadRequest - case codes.Aborted: - return http.StatusConflict - case codes.OutOfRange: - return http.StatusBadRequest - case codes.Unimplemented: - return http.StatusNotImplemented - case codes.Internal: - return http.StatusInternalServerError - case codes.Unavailable: - return http.StatusServiceUnavailable - case codes.DataLoss: - return http.StatusInternalServerError - } - - grpclog.Infof("Unknown gRPC error code: %v", code) - return http.StatusInternalServerError -} - -var ( - // HTTPError replies to the request with an error. - // - // HTTPError is called: - // - From generated per-endpoint gateway handler code, when calling the backend results in an error. - // - From gateway runtime code, when forwarding the response message results in an error. - // - // The default value for HTTPError calls the custom error handler configured on the ServeMux via the - // WithProtoErrorHandler serve option if that option was used, calling GlobalHTTPErrorHandler otherwise. - // - // To customize the error handling of a particular ServeMux instance, use the WithProtoErrorHandler - // serve option. - // - // To customize the error format for all ServeMux instances not using the WithProtoErrorHandler serve - // option, set GlobalHTTPErrorHandler to a custom function. - // - // Setting this variable directly to customize error format is deprecated. - HTTPError = MuxOrGlobalHTTPError - - // GlobalHTTPErrorHandler is the HTTPError handler for all ServeMux instances not using the - // WithProtoErrorHandler serve option. - // - // You can set a custom function to this variable to customize error format. - GlobalHTTPErrorHandler = DefaultHTTPError - - // OtherErrorHandler handles gateway errors from parsing and routing client requests for all - // ServeMux instances not using the WithProtoErrorHandler serve option. - // - // It returns the following error codes: StatusMethodNotAllowed StatusNotFound StatusBadRequest - // - // To customize parsing and routing error handling of a particular ServeMux instance, use the - // WithProtoErrorHandler serve option. - // - // To customize parsing and routing error handling of all ServeMux instances not using the - // WithProtoErrorHandler serve option, set a custom function to this variable. - OtherErrorHandler = DefaultOtherErrorHandler -) - -// MuxOrGlobalHTTPError uses the mux-configured error handler, falling back to GlobalErrorHandler. -func MuxOrGlobalHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { - if mux.protoErrorHandler != nil { - mux.protoErrorHandler(ctx, mux, marshaler, w, r, err) - } else { - GlobalHTTPErrorHandler(ctx, mux, marshaler, w, r, err) - } -} - -// DefaultHTTPError is the default implementation of HTTPError. -// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. -// If otherwise, it replies with http.StatusInternalServerError. -// -// The response body returned by this function is a JSON object, -// which contains a member whose key is "error" and whose value is err.Error(). -func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { - const fallback = `{"error": "failed to marshal error message"}` - - s, ok := status.FromError(err) - if !ok { - s = status.New(codes.Unknown, err.Error()) - } - - w.Header().Del("Trailer") - w.Header().Del("Transfer-Encoding") - - contentType := marshaler.ContentType() - // Check marshaler on run time in order to keep backwards compatibility - // An interface param needs to be added to the ContentType() function on - // the Marshal interface to be able to remove this check - if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok { - pb := s.Proto() - contentType = typeMarshaler.ContentTypeFromMessage(pb) - } - w.Header().Set("Content-Type", contentType) - - body := &internal.Error{ - Error: s.Message(), - Message: s.Message(), - Code: int32(s.Code()), - Details: s.Proto().GetDetails(), - } - - buf, merr := marshaler.Marshal(body) - if merr != nil { - grpclog.Infof("Failed to marshal error message %q: %v", body, merr) - w.WriteHeader(http.StatusInternalServerError) - if _, err := io.WriteString(w, fallback); err != nil { - grpclog.Infof("Failed to write response: %v", err) - } - return - } - - md, ok := ServerMetadataFromContext(ctx) - if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") - } - - handleForwardResponseServerMetadata(w, mux, md) - - // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2 - // Unless the request includes a TE header field indicating "trailers" - // is acceptable, as described in Section 4.3, a server SHOULD NOT - // generate trailer fields that it believes are necessary for the user - // agent to receive. - var wantsTrailers bool - - if te := r.Header.Get("TE"); strings.Contains(strings.ToLower(te), "trailers") { - wantsTrailers = true - handleForwardResponseTrailerHeader(w, md) - w.Header().Set("Transfer-Encoding", "chunked") - } - - st := HTTPStatusFromCode(s.Code()) - w.WriteHeader(st) - if _, err := w.Write(buf); err != nil { - grpclog.Infof("Failed to write response: %v", err) - } - - if wantsTrailers { - handleForwardResponseTrailer(w, md) - } -} - -// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler. -// It simply writes a string representation of the given error into "w". -func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) { - http.Error(w, msg, code) -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go deleted file mode 100644 index aef645e40b92d..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go +++ /dev/null @@ -1,89 +0,0 @@ -package runtime - -import ( - "encoding/json" - "io" - "strings" - - descriptor2 "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/protoc-gen-go/descriptor" - "google.golang.org/genproto/protobuf/field_mask" -) - -func translateName(name string, md *descriptor.DescriptorProto) (string, *descriptor.DescriptorProto) { - // TODO - should really gate this with a test that the marshaller has used json names - if md != nil { - for _, f := range md.Field { - if f.JsonName != nil && f.Name != nil && *f.JsonName == name { - var subType *descriptor.DescriptorProto - - // If the field has a TypeName then we retrieve the nested type for translating the embedded message names. - if f.TypeName != nil { - typeSplit := strings.Split(*f.TypeName, ".") - typeName := typeSplit[len(typeSplit)-1] - for _, t := range md.NestedType { - if typeName == *t.Name { - subType = t - } - } - } - return *f.Name, subType - } - } - } - return name, nil -} - -// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body. -func FieldMaskFromRequestBody(r io.Reader, md *descriptor.DescriptorProto) (*field_mask.FieldMask, error) { - fm := &field_mask.FieldMask{} - var root interface{} - if err := json.NewDecoder(r).Decode(&root); err != nil { - if err == io.EOF { - return fm, nil - } - return nil, err - } - - queue := []fieldMaskPathItem{{node: root, md: md}} - for len(queue) > 0 { - // dequeue an item - item := queue[0] - queue = queue[1:] - - if m, ok := item.node.(map[string]interface{}); ok { - // if the item is an object, then enqueue all of its children - for k, v := range m { - protoName, subMd := translateName(k, item.md) - if subMsg, ok := v.(descriptor2.Message); ok { - _, subMd = descriptor2.ForMessage(subMsg) - } - - var path string - if item.path == "" { - path = protoName - } else { - path = item.path + "." + protoName - } - queue = append(queue, fieldMaskPathItem{path: path, node: v, md: subMd}) - } - } else if len(item.path) > 0 { - // otherwise, it's a leaf node so print its path - fm.Paths = append(fm.Paths, item.path) - } - } - - return fm, nil -} - -// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask -type fieldMaskPathItem struct { - // the list of prior fields leading up to node connected by dots - path string - - // a generic decoded json object the current item to inspect for further path extraction - node interface{} - - // descriptor for parent message - md *descriptor.DescriptorProto -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go deleted file mode 100644 index 3fd30da22a70d..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go +++ /dev/null @@ -1,106 +0,0 @@ -package runtime - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/ptypes/any" - "github.com/grpc-ecosystem/grpc-gateway/internal" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/status" -) - -// StreamErrorHandlerFunc accepts an error as a gRPC error generated via status package and translates it into a -// a proto struct used to represent error at the end of a stream. -type StreamErrorHandlerFunc func(context.Context, error) *StreamError - -// StreamError is the payload for the final message in a server stream in the event that the server returns an -// error after a response message has already been sent. -type StreamError internal.StreamError - -// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request. -type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error) - -var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler - -// DefaultHTTPProtoErrorHandler is an implementation of HTTPError. -// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. -// If otherwise, it replies with http.StatusInternalServerError. -// -// The response body returned by this function is a Status message marshaled by a Marshaler. -// -// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead. -func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { - // return Internal when Marshal failed - const fallback = `{"code": 13, "message": "failed to marshal error message"}` - - s, ok := status.FromError(err) - if !ok { - s = status.New(codes.Unknown, err.Error()) - } - - w.Header().Del("Trailer") - - contentType := marshaler.ContentType() - // Check marshaler on run time in order to keep backwards compatibility - // An interface param needs to be added to the ContentType() function on - // the Marshal interface to be able to remove this check - if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok { - pb := s.Proto() - contentType = typeMarshaler.ContentTypeFromMessage(pb) - } - w.Header().Set("Content-Type", contentType) - - buf, merr := marshaler.Marshal(s.Proto()) - if merr != nil { - grpclog.Infof("Failed to marshal error message %q: %v", s.Proto(), merr) - w.WriteHeader(http.StatusInternalServerError) - if _, err := io.WriteString(w, fallback); err != nil { - grpclog.Infof("Failed to write response: %v", err) - } - return - } - - md, ok := ServerMetadataFromContext(ctx) - if !ok { - grpclog.Infof("Failed to extract ServerMetadata from context") - } - - handleForwardResponseServerMetadata(w, mux, md) - handleForwardResponseTrailerHeader(w, md) - st := HTTPStatusFromCode(s.Code()) - w.WriteHeader(st) - if _, err := w.Write(buf); err != nil { - grpclog.Infof("Failed to write response: %v", err) - } - - handleForwardResponseTrailer(w, md) -} - -// DefaultHTTPStreamErrorHandler converts the given err into a *StreamError via -// default logic. -// -// It extracts the gRPC status from err if possible. The fields of the status are -// used to populate the returned StreamError, and the HTTP status code is derived -// from the gRPC code via HTTPStatusFromCode. If the given err does not contain a -// gRPC status, an "Unknown" gRPC code is used and "Internal Server Error" HTTP code. -func DefaultHTTPStreamErrorHandler(_ context.Context, err error) *StreamError { - grpcCode := codes.Unknown - grpcMessage := err.Error() - var grpcDetails []*any.Any - if s, ok := status.FromError(err); ok { - grpcCode = s.Code() - grpcMessage = s.Message() - grpcDetails = s.Proto().GetDetails() - } - httpCode := HTTPStatusFromCode(grpcCode) - return &StreamError{ - GrpcCode: int32(grpcCode), - HttpCode: int32(httpCode), - Message: grpcMessage, - HttpStatus: http.StatusText(httpCode), - Details: grpcDetails, - } -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go deleted file mode 100644 index ba66842c33090..0000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go +++ /dev/null @@ -1,406 +0,0 @@ -package runtime - -import ( - "encoding/base64" - "fmt" - "net/url" - "reflect" - "regexp" - "strconv" - "strings" - "time" - - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc/grpclog" -) - -var valuesKeyRegexp = regexp.MustCompile("^(.*)\\[(.*)\\]$") - -var currentQueryParser QueryParameterParser = &defaultQueryParser{} - -// QueryParameterParser defines interface for all query parameter parsers -type QueryParameterParser interface { - Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error -} - -// PopulateQueryParameters parses query parameters -// into "msg" using current query parser -func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { - return currentQueryParser.Parse(msg, values, filter) -} - -type defaultQueryParser struct{} - -// Parse populates "values" into "msg". -// A value is ignored if its key starts with one of the elements in "filter". -func (*defaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { - for key, values := range values { - match := valuesKeyRegexp.FindStringSubmatch(key) - if len(match) == 3 { - key = match[1] - values = append([]string{match[2]}, values...) - } - fieldPath := strings.Split(key, ".") - if filter.HasCommonPrefix(fieldPath) { - continue - } - if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil { - return err - } - } - return nil -} - -// PopulateFieldFromPath sets a value in a nested Protobuf structure. -// It instantiates missing protobuf fields as it goes. -func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error { - fieldPath := strings.Split(fieldPathString, ".") - return populateFieldValueFromPath(msg, fieldPath, []string{value}) -} - -func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error { - m := reflect.ValueOf(msg) - if m.Kind() != reflect.Ptr { - return fmt.Errorf("unexpected type %T: %v", msg, msg) - } - var props *proto.Properties - m = m.Elem() - for i, fieldName := range fieldPath { - isLast := i == len(fieldPath)-1 - if !isLast && m.Kind() != reflect.Struct { - return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, ".")) - } - var f reflect.Value - var err error - f, props, err = fieldByProtoName(m, fieldName) - if err != nil { - return err - } else if !f.IsValid() { - grpclog.Infof("field not found in %T: %s", msg, strings.Join(fieldPath, ".")) - return nil - } - - switch f.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64: - if !isLast { - return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) - } - m = f - case reflect.Slice: - if !isLast { - return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, ".")) - } - // Handle []byte - if f.Type().Elem().Kind() == reflect.Uint8 { - m = f - break - } - return populateRepeatedField(f, values, props) - case reflect.Ptr: - if f.IsNil() { - m = reflect.New(f.Type().Elem()) - f.Set(m.Convert(f.Type())) - } - m = f.Elem() - continue - case reflect.Struct: - m = f - continue - case reflect.Map: - if !isLast { - return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) - } - return populateMapField(f, values, props) - default: - return fmt.Errorf("unexpected type %s in %T", f.Type(), msg) - } - } - switch len(values) { - case 0: - return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, ".")) - case 1: - default: - grpclog.Infof("too many field values: %s", strings.Join(fieldPath, ".")) - } - return populateField(m, values[0], props) -} - -// fieldByProtoName looks up a field whose corresponding protobuf field name is "name". -// "m" must be a struct value. It returns zero reflect.Value if no such field found. -func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) { - props := proto.GetProperties(m.Type()) - - // look up field name in oneof map - for _, op := range props.OneofTypes { - if name == op.Prop.OrigName || name == op.Prop.JSONName { - v := reflect.New(op.Type.Elem()) - field := m.Field(op.Field) - if !field.IsNil() { - return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName) - } - field.Set(v) - return v.Elem().Field(0), op.Prop, nil - } - } - - for _, p := range props.Prop { - if p.OrigName == name { - return m.FieldByName(p.Name), p, nil - } - if p.JSONName == name { - return m.FieldByName(p.Name), p, nil - } - } - return reflect.Value{}, nil, nil -} - -func populateMapField(f reflect.Value, values []string, props *proto.Properties) error { - if len(values) != 2 { - return fmt.Errorf("more than one value provided for key %s in map %s", values[0], props.Name) - } - - key, value := values[0], values[1] - keyType := f.Type().Key() - valueType := f.Type().Elem() - if f.IsNil() { - f.Set(reflect.MakeMap(f.Type())) - } - - keyConv, ok := convFromType[keyType.Kind()] - if !ok { - return fmt.Errorf("unsupported key type %s in map %s", keyType, props.Name) - } - valueConv, ok := convFromType[valueType.Kind()] - if !ok { - return fmt.Errorf("unsupported value type %s in map %s", valueType, props.Name) - } - - keyV := keyConv.Call([]reflect.Value{reflect.ValueOf(key)}) - if err := keyV[1].Interface(); err != nil { - return err.(error) - } - valueV := valueConv.Call([]reflect.Value{reflect.ValueOf(value)}) - if err := valueV[1].Interface(); err != nil { - return err.(error) - } - - f.SetMapIndex(keyV[0].Convert(keyType), valueV[0].Convert(valueType)) - - return nil -} - -func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error { - elemType := f.Type().Elem() - - // is the destination field a slice of an enumeration type? - if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { - return populateFieldEnumRepeated(f, values, enumValMap) - } - - conv, ok := convFromType[elemType.Kind()] - if !ok { - return fmt.Errorf("unsupported field type %s", elemType) - } - f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) - for i, v := range values { - result := conv.Call([]reflect.Value{reflect.ValueOf(v)}) - if err := result[1].Interface(); err != nil { - return err.(error) - } - f.Index(i).Set(result[0].Convert(f.Index(i).Type())) - } - return nil -} - -func populateField(f reflect.Value, value string, props *proto.Properties) error { - i := f.Addr().Interface() - - // Handle protobuf well known types - var name string - switch m := i.(type) { - case interface{ XXX_WellKnownType() string }: - name = m.XXX_WellKnownType() - case proto.Message: - const wktPrefix = "google.protobuf." - if fullName := proto.MessageName(m); strings.HasPrefix(fullName, wktPrefix) { - name = fullName[len(wktPrefix):] - } - } - switch name { - case "Timestamp": - if value == "null" { - f.FieldByName("Seconds").SetInt(0) - f.FieldByName("Nanos").SetInt(0) - return nil - } - - t, err := time.Parse(time.RFC3339Nano, value) - if err != nil { - return fmt.Errorf("bad Timestamp: %v", err) - } - f.FieldByName("Seconds").SetInt(int64(t.Unix())) - f.FieldByName("Nanos").SetInt(int64(t.Nanosecond())) - return nil - case "Duration": - if value == "null" { - f.FieldByName("Seconds").SetInt(0) - f.FieldByName("Nanos").SetInt(0) - return nil - } - d, err := time.ParseDuration(value) - if err != nil { - return fmt.Errorf("bad Duration: %v", err) - } - - ns := d.Nanoseconds() - s := ns / 1e9 - ns %= 1e9 - f.FieldByName("Seconds").SetInt(s) - f.FieldByName("Nanos").SetInt(ns) - return nil - case "DoubleValue": - fallthrough - case "FloatValue": - float64Val, err := strconv.ParseFloat(value, 64) - if err != nil { - return fmt.Errorf("bad DoubleValue: %s", value) - } - f.FieldByName("Value").SetFloat(float64Val) - return nil - case "Int64Value": - fallthrough - case "Int32Value": - int64Val, err := strconv.ParseInt(value, 10, 64) - if err != nil { - return fmt.Errorf("bad DoubleValue: %s", value) - } - f.FieldByName("Value").SetInt(int64Val) - return nil - case "UInt64Value": - fallthrough - case "UInt32Value": - uint64Val, err := strconv.ParseUint(value, 10, 64) - if err != nil { - return fmt.Errorf("bad DoubleValue: %s", value) - } - f.FieldByName("Value").SetUint(uint64Val) - return nil - case "BoolValue": - if value == "true" { - f.FieldByName("Value").SetBool(true) - } else if value == "false" { - f.FieldByName("Value").SetBool(false) - } else { - return fmt.Errorf("bad BoolValue: %s", value) - } - return nil - case "StringValue": - f.FieldByName("Value").SetString(value) - return nil - case "BytesValue": - bytesVal, err := base64.StdEncoding.DecodeString(value) - if err != nil { - return fmt.Errorf("bad BytesValue: %s", value) - } - f.FieldByName("Value").SetBytes(bytesVal) - return nil - case "FieldMask": - p := f.FieldByName("Paths") - for _, v := range strings.Split(value, ",") { - if v != "" { - p.Set(reflect.Append(p, reflect.ValueOf(v))) - } - } - return nil - } - - // Handle Time and Duration stdlib types - switch t := i.(type) { - case *time.Time: - pt, err := time.Parse(time.RFC3339Nano, value) - if err != nil { - return fmt.Errorf("bad Timestamp: %v", err) - } - *t = pt - return nil - case *time.Duration: - d, err := time.ParseDuration(value) - if err != nil { - return fmt.Errorf("bad Duration: %v", err) - } - *t = d - return nil - } - - // is the destination field an enumeration type? - if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { - return populateFieldEnum(f, value, enumValMap) - } - - conv, ok := convFromType[f.Kind()] - if !ok { - return fmt.Errorf("field type %T is not supported in query parameters", i) - } - result := conv.Call([]reflect.Value{reflect.ValueOf(value)}) - if err := result[1].Interface(); err != nil { - return err.(error) - } - f.Set(result[0].Convert(f.Type())) - return nil -} - -func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) { - // see if it's an enumeration string - if enumVal, ok := enumValMap[value]; ok { - return reflect.ValueOf(enumVal).Convert(t), nil - } - - // check for an integer that matches an enumeration value - eVal, err := strconv.Atoi(value) - if err != nil { - return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) - } - for _, v := range enumValMap { - if v == int32(eVal) { - return reflect.ValueOf(eVal).Convert(t), nil - } - } - return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) -} - -func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error { - cval, err := convertEnum(value, f.Type(), enumValMap) - if err != nil { - return err - } - f.Set(cval) - return nil -} - -func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error { - elemType := f.Type().Elem() - f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) - for i, v := range values { - result, err := convertEnum(v, elemType, enumValMap) - if err != nil { - return err - } - f.Index(i).Set(result) - } - return nil -} - -var ( - convFromType = map[reflect.Kind]reflect.Value{ - reflect.String: reflect.ValueOf(String), - reflect.Bool: reflect.ValueOf(Bool), - reflect.Float64: reflect.ValueOf(Float64), - reflect.Float32: reflect.ValueOf(Float32), - reflect.Int64: reflect.ValueOf(Int64), - reflect.Int32: reflect.ValueOf(Int32), - reflect.Uint64: reflect.ValueOf(Uint64), - reflect.Uint32: reflect.ValueOf(Uint32), - reflect.Slice: reflect.ValueOf(Bytes), - } -) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE.txt similarity index 100% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE.txt diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel new file mode 100644 index 0000000000000..f694f3c0d035a --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel @@ -0,0 +1,35 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "httprule", + srcs = [ + "compile.go", + "parse.go", + "types.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule", + deps = ["//utilities"], +) + +go_test( + name = "httprule_test", + size = "small", + srcs = [ + "compile_test.go", + "parse_test.go", + "types_test.go", + ], + embed = [":httprule"], + deps = [ + "//utilities", + "@com_github_golang_glog//:glog", + ], +) + +alias( + name = "go_default_library", + actual = ":httprule", + visibility = ["//:__subpackages__"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go new file mode 100644 index 0000000000000..3cd9372959d36 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go @@ -0,0 +1,121 @@ +package httprule + +import ( + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" +) + +const ( + opcodeVersion = 1 +) + +// Template is a compiled representation of path templates. +type Template struct { + // Version is the version number of the format. + Version int + // OpCodes is a sequence of operations. + OpCodes []int + // Pool is a constant pool + Pool []string + // Verb is a VERB part in the template. + Verb string + // Fields is a list of field paths bound in this template. + Fields []string + // Original template (example: /v1/a_bit_of_everything) + Template string +} + +// Compiler compiles utilities representation of path templates into marshallable operations. +// They can be unmarshalled by runtime.NewPattern. +type Compiler interface { + Compile() Template +} + +type op struct { + // code is the opcode of the operation + code utilities.OpCode + + // str is a string operand of the code. + // num is ignored if str is not empty. + str string + + // num is a numeric operand of the code. + num int +} + +func (w wildcard) compile() []op { + return []op{ + {code: utilities.OpPush}, + } +} + +func (w deepWildcard) compile() []op { + return []op{ + {code: utilities.OpPushM}, + } +} + +func (l literal) compile() []op { + return []op{ + { + code: utilities.OpLitPush, + str: string(l), + }, + } +} + +func (v variable) compile() []op { + var ops []op + for _, s := range v.segments { + ops = append(ops, s.compile()...) + } + ops = append(ops, op{ + code: utilities.OpConcatN, + num: len(v.segments), + }, op{ + code: utilities.OpCapture, + str: v.path, + }) + + return ops +} + +func (t template) Compile() Template { + var rawOps []op + for _, s := range t.segments { + rawOps = append(rawOps, s.compile()...) + } + + var ( + ops []int + pool []string + fields []string + ) + consts := make(map[string]int) + for _, op := range rawOps { + ops = append(ops, int(op.code)) + if op.str == "" { + ops = append(ops, op.num) + } else { + // eof segment literal represents the "/" path pattern + if op.str == eof { + op.str = "" + } + if _, ok := consts[op.str]; !ok { + consts[op.str] = len(pool) + pool = append(pool, op.str) + } + ops = append(ops, consts[op.str]) + } + if op.code == utilities.OpCapture { + fields = append(fields, op.str) + } + } + return Template{ + Version: opcodeVersion, + OpCodes: ops, + Pool: pool, + Verb: t.verb, + Fields: fields, + Template: t.template, + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go new file mode 100644 index 0000000000000..138f7c12f0e36 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go @@ -0,0 +1,11 @@ +// +build gofuzz + +package httprule + +func Fuzz(data []byte) int { + _, err := Parse(string(data)) + if err != nil { + return 0 + } + return 0 +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go new file mode 100644 index 0000000000000..5edd784e62ade --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go @@ -0,0 +1,368 @@ +package httprule + +import ( + "fmt" + "strings" +) + +// InvalidTemplateError indicates that the path template is not valid. +type InvalidTemplateError struct { + tmpl string + msg string +} + +func (e InvalidTemplateError) Error() string { + return fmt.Sprintf("%s: %s", e.msg, e.tmpl) +} + +// Parse parses the string representation of path template +func Parse(tmpl string) (Compiler, error) { + if !strings.HasPrefix(tmpl, "/") { + return template{}, InvalidTemplateError{tmpl: tmpl, msg: "no leading /"} + } + tokens, verb := tokenize(tmpl[1:]) + + p := parser{tokens: tokens} + segs, err := p.topLevelSegments() + if err != nil { + return template{}, InvalidTemplateError{tmpl: tmpl, msg: err.Error()} + } + + return template{ + segments: segs, + verb: verb, + template: tmpl, + }, nil +} + +func tokenize(path string) (tokens []string, verb string) { + if path == "" { + return []string{eof}, "" + } + + const ( + init = iota + field + nested + ) + st := init + for path != "" { + var idx int + switch st { + case init: + idx = strings.IndexAny(path, "/{") + case field: + idx = strings.IndexAny(path, ".=}") + case nested: + idx = strings.IndexAny(path, "/}") + } + if idx < 0 { + tokens = append(tokens, path) + break + } + switch r := path[idx]; r { + case '/', '.': + case '{': + st = field + case '=': + st = nested + case '}': + st = init + } + if idx == 0 { + tokens = append(tokens, path[idx:idx+1]) + } else { + tokens = append(tokens, path[:idx], path[idx:idx+1]) + } + path = path[idx+1:] + } + + l := len(tokens) + // See + // https://github.com/grpc-ecosystem/grpc-gateway/pull/1947#issuecomment-774523693 ; + // although normal and backwards-compat logic here is to use the last index + // of a colon, if the final segment is a variable followed by a colon, the + // part following the colon must be a verb. Hence if the previous token is + // an end var marker, we switch the index we're looking for to Index instead + // of LastIndex, so that we correctly grab the remaining part of the path as + // the verb. + var penultimateTokenIsEndVar bool + switch l { + case 0, 1: + // Not enough to be variable so skip this logic and don't result in an + // invalid index + default: + penultimateTokenIsEndVar = tokens[l-2] == "}" + } + t := tokens[l-1] + var idx int + if penultimateTokenIsEndVar { + idx = strings.Index(t, ":") + } else { + idx = strings.LastIndex(t, ":") + } + if idx == 0 { + tokens, verb = tokens[:l-1], t[1:] + } else if idx > 0 { + tokens[l-1], verb = t[:idx], t[idx+1:] + } + tokens = append(tokens, eof) + return tokens, verb +} + +// parser is a parser of the template syntax defined in github.com/googleapis/googleapis/google/api/http.proto. +type parser struct { + tokens []string + accepted []string +} + +// topLevelSegments is the target of this parser. +func (p *parser) topLevelSegments() ([]segment, error) { + if _, err := p.accept(typeEOF); err == nil { + p.tokens = p.tokens[:0] + return []segment{literal(eof)}, nil + } + segs, err := p.segments() + if err != nil { + return nil, err + } + if _, err := p.accept(typeEOF); err != nil { + return nil, fmt.Errorf("unexpected token %q after segments %q", p.tokens[0], strings.Join(p.accepted, "")) + } + return segs, nil +} + +func (p *parser) segments() ([]segment, error) { + s, err := p.segment() + if err != nil { + return nil, err + } + + segs := []segment{s} + for { + if _, err := p.accept("/"); err != nil { + return segs, nil + } + s, err := p.segment() + if err != nil { + return segs, err + } + segs = append(segs, s) + } +} + +func (p *parser) segment() (segment, error) { + if _, err := p.accept("*"); err == nil { + return wildcard{}, nil + } + if _, err := p.accept("**"); err == nil { + return deepWildcard{}, nil + } + if l, err := p.literal(); err == nil { + return l, nil + } + + v, err := p.variable() + if err != nil { + return nil, fmt.Errorf("segment neither wildcards, literal or variable: %v", err) + } + return v, err +} + +func (p *parser) literal() (segment, error) { + lit, err := p.accept(typeLiteral) + if err != nil { + return nil, err + } + return literal(lit), nil +} + +func (p *parser) variable() (segment, error) { + if _, err := p.accept("{"); err != nil { + return nil, err + } + + path, err := p.fieldPath() + if err != nil { + return nil, err + } + + var segs []segment + if _, err := p.accept("="); err == nil { + segs, err = p.segments() + if err != nil { + return nil, fmt.Errorf("invalid segment in variable %q: %v", path, err) + } + } else { + segs = []segment{wildcard{}} + } + + if _, err := p.accept("}"); err != nil { + return nil, fmt.Errorf("unterminated variable segment: %s", path) + } + return variable{ + path: path, + segments: segs, + }, nil +} + +func (p *parser) fieldPath() (string, error) { + c, err := p.accept(typeIdent) + if err != nil { + return "", err + } + components := []string{c} + for { + if _, err = p.accept("."); err != nil { + return strings.Join(components, "."), nil + } + c, err := p.accept(typeIdent) + if err != nil { + return "", fmt.Errorf("invalid field path component: %v", err) + } + components = append(components, c) + } +} + +// A termType is a type of terminal symbols. +type termType string + +// These constants define some of valid values of termType. +// They improve readability of parse functions. +// +// You can also use "/", "*", "**", "." or "=" as valid values. +const ( + typeIdent = termType("ident") + typeLiteral = termType("literal") + typeEOF = termType("$") +) + +const ( + // eof is the terminal symbol which always appears at the end of token sequence. + eof = "\u0000" +) + +// accept tries to accept a token in "p". +// This function consumes a token and returns it if it matches to the specified "term". +// If it doesn't match, the function does not consume any tokens and return an error. +func (p *parser) accept(term termType) (string, error) { + t := p.tokens[0] + switch term { + case "/", "*", "**", ".", "=", "{", "}": + if t != string(term) && t != "/" { + return "", fmt.Errorf("expected %q but got %q", term, t) + } + case typeEOF: + if t != eof { + return "", fmt.Errorf("expected EOF but got %q", t) + } + case typeIdent: + if err := expectIdent(t); err != nil { + return "", err + } + case typeLiteral: + if err := expectPChars(t); err != nil { + return "", err + } + default: + return "", fmt.Errorf("unknown termType %q", term) + } + p.tokens = p.tokens[1:] + p.accepted = append(p.accepted, t) + return t, nil +} + +// expectPChars determines if "t" consists of only pchars defined in RFC3986. +// +// https://www.ietf.org/rfc/rfc3986.txt, P.49 +// pchar = unreserved / pct-encoded / sub-delims / ":" / "@" +// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" +// sub-delims = "!" / "$" / "&" / "'" / "(" / ")" +// / "*" / "+" / "," / ";" / "=" +// pct-encoded = "%" HEXDIG HEXDIG +func expectPChars(t string) error { + const ( + init = iota + pct1 + pct2 + ) + st := init + for _, r := range t { + if st != init { + if !isHexDigit(r) { + return fmt.Errorf("invalid hexdigit: %c(%U)", r, r) + } + switch st { + case pct1: + st = pct2 + case pct2: + st = init + } + continue + } + + // unreserved + switch { + case 'A' <= r && r <= 'Z': + continue + case 'a' <= r && r <= 'z': + continue + case '0' <= r && r <= '9': + continue + } + switch r { + case '-', '.', '_', '~': + // unreserved + case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': + // sub-delims + case ':', '@': + // rest of pchar + case '%': + // pct-encoded + st = pct1 + default: + return fmt.Errorf("invalid character in path segment: %q(%U)", r, r) + } + } + if st != init { + return fmt.Errorf("invalid percent-encoding in %q", t) + } + return nil +} + +// expectIdent determines if "ident" is a valid identifier in .proto schema ([[:alpha:]_][[:alphanum:]_]*). +func expectIdent(ident string) error { + if ident == "" { + return fmt.Errorf("empty identifier") + } + for pos, r := range ident { + switch { + case '0' <= r && r <= '9': + if pos == 0 { + return fmt.Errorf("identifier starting with digit: %s", ident) + } + continue + case 'A' <= r && r <= 'Z': + continue + case 'a' <= r && r <= 'z': + continue + case r == '_': + continue + default: + return fmt.Errorf("invalid character %q(%U) in identifier: %s", r, r, ident) + } + } + return nil +} + +func isHexDigit(r rune) bool { + switch { + case '0' <= r && r <= '9': + return true + case 'A' <= r && r <= 'F': + return true + case 'a' <= r && r <= 'f': + return true + } + return false +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go new file mode 100644 index 0000000000000..5a814a0004cec --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go @@ -0,0 +1,60 @@ +package httprule + +import ( + "fmt" + "strings" +) + +type template struct { + segments []segment + verb string + template string +} + +type segment interface { + fmt.Stringer + compile() (ops []op) +} + +type wildcard struct{} + +type deepWildcard struct{} + +type literal string + +type variable struct { + path string + segments []segment +} + +func (wildcard) String() string { + return "*" +} + +func (deepWildcard) String() string { + return "**" +} + +func (l literal) String() string { + return string(l) +} + +func (v variable) String() string { + var segs []string + for _, s := range v.segments { + segs = append(segs, s.String()) + } + return fmt.Sprintf("{%s=%s}", v.path, strings.Join(segs, "/")) +} + +func (t template) String() string { + var segs []string + for _, s := range t.segments { + segs = append(segs, s.String()) + } + str := strings.Join(segs, "/") + if t.verb != "" { + str = fmt.Sprintf("%s:%s", str, t.verb) + } + return "/" + str +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel new file mode 100644 index 0000000000000..95f867a5286cb --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel @@ -0,0 +1,91 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "runtime", + srcs = [ + "context.go", + "convert.go", + "doc.go", + "errors.go", + "fieldmask.go", + "handler.go", + "marshal_httpbodyproto.go", + "marshal_json.go", + "marshal_jsonpb.go", + "marshal_proto.go", + "marshaler.go", + "marshaler_registry.go", + "mux.go", + "pattern.go", + "proto2_convert.go", + "query.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/runtime", + deps = [ + "//internal/httprule", + "//utilities", + "@go_googleapis//google/api:httpbody_go_proto", + "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//grpclog", + "@org_golang_google_grpc//metadata", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//encoding/protojson", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//reflect/protoreflect", + "@org_golang_google_protobuf//reflect/protoregistry", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/timestamppb", + "@org_golang_google_protobuf//types/known/wrapperspb", + ], +) + +go_test( + name = "runtime_test", + size = "small", + srcs = [ + "context_test.go", + "convert_test.go", + "errors_test.go", + "fieldmask_test.go", + "handler_test.go", + "marshal_httpbodyproto_test.go", + "marshal_json_test.go", + "marshal_jsonpb_test.go", + "marshal_proto_test.go", + "marshaler_registry_test.go", + "mux_test.go", + "pattern_test.go", + "query_test.go", + ], + embed = [":runtime"], + deps = [ + "//runtime/internal/examplepb", + "//utilities", + "@com_github_google_go_cmp//cmp", + "@com_github_google_go_cmp//cmp/cmpopts", + "@go_googleapis//google/api:httpbody_go_proto", + "@go_googleapis//google/rpc:errdetails_go_proto", + "@go_googleapis//google/rpc:status_go_proto", + "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", + "@org_golang_google_grpc//codes", + "@org_golang_google_grpc//metadata", + "@org_golang_google_grpc//status", + "@org_golang_google_protobuf//encoding/protojson", + "@org_golang_google_protobuf//proto", + "@org_golang_google_protobuf//testing/protocmp", + "@org_golang_google_protobuf//types/known/durationpb", + "@org_golang_google_protobuf//types/known/emptypb", + "@org_golang_google_protobuf//types/known/structpb", + "@org_golang_google_protobuf//types/known/timestamppb", + "@org_golang_google_protobuf//types/known/wrapperspb", + ], +) + +alias( + name = "go_default_library", + actual = ":runtime", + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go similarity index 79% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go index d8cbd4cc96b03..fb57b9366eab1 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go @@ -41,6 +41,19 @@ var ( DefaultContextTimeout = 0 * time.Second ) +type ( + rpcMethodKey struct{} + httpPathPatternKey struct{} + + AnnotateContextOption func(ctx context.Context) context.Context +) + +func WithHTTPPathPattern(pattern string) AnnotateContextOption { + return func(ctx context.Context) context.Context { + return withHTTPPathPattern(ctx, pattern) + } +} + func decodeBinHeader(v string) ([]byte, error) { if len(v)%4 == 0 { // Input was padded, or padding was not necessary. @@ -56,8 +69,8 @@ At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For", except that the forwarded destination is not another HTTP service but rather a gRPC service. */ -func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) { - ctx, md, err := annotateContext(ctx, mux, req) +func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) { + ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...) if err != nil { return nil, err } @@ -70,8 +83,8 @@ func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (con // AnnotateIncomingContext adds context information such as metadata from the request. // Attach metadata as incoming context. -func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) { - ctx, md, err := annotateContext(ctx, mux, req) +func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) { + ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...) if err != nil { return nil, err } @@ -82,7 +95,11 @@ func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Reque return metadata.NewIncomingContext(ctx, md), nil } -func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, metadata.MD, error) { +func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, metadata.MD, error) { + ctx = withRPCMethod(ctx, rpcMethodName) + for _, o := range options { + ctx = o(ctx) + } var pairs []string timeout := DefaultContextTimeout if tm := req.Header.Get(metadataGrpcTimeout); tm != "" { @@ -132,6 +149,7 @@ func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (con } if timeout != 0 { + //nolint:govet // The context outlives this function ctx, _ = context.WithTimeout(ctx, timeout) } if len(pairs) == 0 { @@ -289,3 +307,39 @@ func isPermanentHTTPHeader(hdr string) bool { } return false } + +// RPCMethod returns the method string for the server context. The returned +// string is in the format of "/package.service/method". +func RPCMethod(ctx context.Context) (string, bool) { + m := ctx.Value(rpcMethodKey{}) + if m == nil { + return "", false + } + ms, ok := m.(string) + if !ok { + return "", false + } + return ms, true +} + +func withRPCMethod(ctx context.Context, rpcMethodName string) context.Context { + return context.WithValue(ctx, rpcMethodKey{}, rpcMethodName) +} + +// HTTPPathPattern returns the HTTP path pattern string relating to the HTTP handler, if one exists. +// The format of the returned string is defined by the google.api.http path template type. +func HTTPPathPattern(ctx context.Context) (string, bool) { + m := ctx.Value(httpPathPatternKey{}) + if m == nil { + return "", false + } + ms, ok := m.(string) + if !ok { + return "", false + } + return ms, true +} + +func withHTTPPathPattern(ctx context.Context, httpPathPattern string) context.Context { + return context.WithValue(ctx, httpPathPatternKey{}, httpPathPattern) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go similarity index 80% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go index 2c279344dc414..e6bc4e6ceece1 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go @@ -6,10 +6,10 @@ import ( "strconv" "strings" - "github.com/golang/protobuf/jsonpb" - "github.com/golang/protobuf/ptypes/duration" - "github.com/golang/protobuf/ptypes/timestamp" - "github.com/golang/protobuf/ptypes/wrappers" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" ) // String just returns the given string. @@ -205,9 +205,11 @@ func BytesSlice(val, sep string) ([][]byte, error) { } // Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp. -func Timestamp(val string) (*timestamp.Timestamp, error) { - var r timestamp.Timestamp - err := jsonpb.UnmarshalString(val, &r) +func Timestamp(val string) (*timestamppb.Timestamp, error) { + var r timestamppb.Timestamp + val = strconv.Quote(strings.Trim(val, `"`)) + unmarshaler := &protojson.UnmarshalOptions{} + err := unmarshaler.Unmarshal([]byte(val), &r) if err != nil { return nil, err } @@ -215,9 +217,11 @@ func Timestamp(val string) (*timestamp.Timestamp, error) { } // Duration converts the given string into a timestamp.Duration. -func Duration(val string) (*duration.Duration, error) { - var r duration.Duration - err := jsonpb.UnmarshalString(val, &r) +func Duration(val string) (*durationpb.Duration, error) { + var r durationpb.Duration + val = strconv.Quote(strings.Trim(val, `"`)) + unmarshaler := &protojson.UnmarshalOptions{} + err := unmarshaler.Unmarshal([]byte(val), &r) if err != nil { return nil, err } @@ -265,54 +269,54 @@ func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) { */ // StringValue well-known type support as wrapper around string type -func StringValue(val string) (*wrappers.StringValue, error) { - return &wrappers.StringValue{Value: val}, nil +func StringValue(val string) (*wrapperspb.StringValue, error) { + return &wrapperspb.StringValue{Value: val}, nil } // FloatValue well-known type support as wrapper around float32 type -func FloatValue(val string) (*wrappers.FloatValue, error) { +func FloatValue(val string) (*wrapperspb.FloatValue, error) { parsedVal, err := Float32(val) - return &wrappers.FloatValue{Value: parsedVal}, err + return &wrapperspb.FloatValue{Value: parsedVal}, err } // DoubleValue well-known type support as wrapper around float64 type -func DoubleValue(val string) (*wrappers.DoubleValue, error) { +func DoubleValue(val string) (*wrapperspb.DoubleValue, error) { parsedVal, err := Float64(val) - return &wrappers.DoubleValue{Value: parsedVal}, err + return &wrapperspb.DoubleValue{Value: parsedVal}, err } // BoolValue well-known type support as wrapper around bool type -func BoolValue(val string) (*wrappers.BoolValue, error) { +func BoolValue(val string) (*wrapperspb.BoolValue, error) { parsedVal, err := Bool(val) - return &wrappers.BoolValue{Value: parsedVal}, err + return &wrapperspb.BoolValue{Value: parsedVal}, err } // Int32Value well-known type support as wrapper around int32 type -func Int32Value(val string) (*wrappers.Int32Value, error) { +func Int32Value(val string) (*wrapperspb.Int32Value, error) { parsedVal, err := Int32(val) - return &wrappers.Int32Value{Value: parsedVal}, err + return &wrapperspb.Int32Value{Value: parsedVal}, err } // UInt32Value well-known type support as wrapper around uint32 type -func UInt32Value(val string) (*wrappers.UInt32Value, error) { +func UInt32Value(val string) (*wrapperspb.UInt32Value, error) { parsedVal, err := Uint32(val) - return &wrappers.UInt32Value{Value: parsedVal}, err + return &wrapperspb.UInt32Value{Value: parsedVal}, err } // Int64Value well-known type support as wrapper around int64 type -func Int64Value(val string) (*wrappers.Int64Value, error) { +func Int64Value(val string) (*wrapperspb.Int64Value, error) { parsedVal, err := Int64(val) - return &wrappers.Int64Value{Value: parsedVal}, err + return &wrapperspb.Int64Value{Value: parsedVal}, err } // UInt64Value well-known type support as wrapper around uint64 type -func UInt64Value(val string) (*wrappers.UInt64Value, error) { +func UInt64Value(val string) (*wrapperspb.UInt64Value, error) { parsedVal, err := Uint64(val) - return &wrappers.UInt64Value{Value: parsedVal}, err + return &wrapperspb.UInt64Value{Value: parsedVal}, err } // BytesValue well-known type support as wrapper around bytes[] type -func BytesValue(val string) (*wrappers.BytesValue, error) { +func BytesValue(val string) (*wrapperspb.BytesValue, error) { parsedVal, err := Bytes(val) - return &wrappers.BytesValue{Value: parsedVal}, err + return &wrapperspb.BytesValue{Value: parsedVal}, err } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go similarity index 100% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go new file mode 100644 index 0000000000000..d9e0013c43927 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go @@ -0,0 +1,180 @@ +package runtime + +import ( + "context" + "errors" + "io" + "net/http" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// ErrorHandlerFunc is the signature used to configure error handling. +type ErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error) + +// StreamErrorHandlerFunc is the signature used to configure stream error handling. +type StreamErrorHandlerFunc func(context.Context, error) *status.Status + +// RoutingErrorHandlerFunc is the signature used to configure error handling for routing errors. +type RoutingErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, int) + +// HTTPStatusError is the error to use when needing to provide a different HTTP status code for an error +// passed to the DefaultRoutingErrorHandler. +type HTTPStatusError struct { + HTTPStatus int + Err error +} + +func (e *HTTPStatusError) Error() string { + return e.Err.Error() +} + +// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status. +// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto +func HTTPStatusFromCode(code codes.Code) int { + switch code { + case codes.OK: + return http.StatusOK + case codes.Canceled: + return http.StatusRequestTimeout + case codes.Unknown: + return http.StatusInternalServerError + case codes.InvalidArgument: + return http.StatusBadRequest + case codes.DeadlineExceeded: + return http.StatusGatewayTimeout + case codes.NotFound: + return http.StatusNotFound + case codes.AlreadyExists: + return http.StatusConflict + case codes.PermissionDenied: + return http.StatusForbidden + case codes.Unauthenticated: + return http.StatusUnauthorized + case codes.ResourceExhausted: + return http.StatusTooManyRequests + case codes.FailedPrecondition: + // Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status. + return http.StatusBadRequest + case codes.Aborted: + return http.StatusConflict + case codes.OutOfRange: + return http.StatusBadRequest + case codes.Unimplemented: + return http.StatusNotImplemented + case codes.Internal: + return http.StatusInternalServerError + case codes.Unavailable: + return http.StatusServiceUnavailable + case codes.DataLoss: + return http.StatusInternalServerError + } + + grpclog.Infof("Unknown gRPC error code: %v", code) + return http.StatusInternalServerError +} + +// HTTPError uses the mux-configured error handler. +func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { + mux.errorHandler(ctx, mux, marshaler, w, r, err) +} + +// DefaultHTTPErrorHandler is the default error handler. +// If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode. +// If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is +// intended to allow passing through of specific statuses via the function set via WithRoutingErrorHandler +// for the ServeMux constructor to handle edge cases which the standard mappings in HTTPStatusFromCode +// are insufficient for. +// If otherwise, it replies with http.StatusInternalServerError. +// +// The response body written by this function is a Status message marshaled by the Marshaler. +func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { + // return Internal when Marshal failed + const fallback = `{"code": 13, "message": "failed to marshal error message"}` + + var customStatus *HTTPStatusError + if errors.As(err, &customStatus) { + err = customStatus.Err + } + + s := status.Convert(err) + pb := s.Proto() + + w.Header().Del("Trailer") + w.Header().Del("Transfer-Encoding") + + contentType := marshaler.ContentType(pb) + w.Header().Set("Content-Type", contentType) + + if s.Code() == codes.Unauthenticated { + w.Header().Set("WWW-Authenticate", s.Message()) + } + + buf, merr := marshaler.Marshal(pb) + if merr != nil { + grpclog.Infof("Failed to marshal error message %q: %v", s, merr) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallback); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + return + } + + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + + // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2 + // Unless the request includes a TE header field indicating "trailers" + // is acceptable, as described in Section 4.3, a server SHOULD NOT + // generate trailer fields that it believes are necessary for the user + // agent to receive. + doForwardTrailers := requestAcceptsTrailers(r) + + if doForwardTrailers { + handleForwardResponseTrailerHeader(w, md) + w.Header().Set("Transfer-Encoding", "chunked") + } + + st := HTTPStatusFromCode(s.Code()) + if customStatus != nil { + st = customStatus.HTTPStatus + } + + w.WriteHeader(st) + if _, err := w.Write(buf); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + + if doForwardTrailers { + handleForwardResponseTrailer(w, md) + } +} + +func DefaultStreamErrorHandler(_ context.Context, err error) *status.Status { + return status.Convert(err) +} + +// DefaultRoutingErrorHandler is our default handler for routing errors. +// By default http error codes mapped on the following error codes: +// NotFound -> grpc.NotFound +// StatusBadRequest -> grpc.InvalidArgument +// MethodNotAllowed -> grpc.Unimplemented +// Other -> grpc.Internal, method is not expecting to be called for anything else +func DefaultRoutingErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, httpStatus int) { + sterr := status.Error(codes.Internal, "Unexpected routing error") + switch httpStatus { + case http.StatusBadRequest: + sterr = status.Error(codes.InvalidArgument, http.StatusText(httpStatus)) + case http.StatusMethodNotAllowed: + sterr = status.Error(codes.Unimplemented, http.StatusText(httpStatus)) + case http.StatusNotFound: + sterr = status.Error(codes.NotFound, http.StatusText(httpStatus)) + } + mux.errorHandler(ctx, mux, marshaler, w, r, sterr) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go new file mode 100644 index 0000000000000..0138ed2f769f7 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go @@ -0,0 +1,165 @@ +package runtime + +import ( + "encoding/json" + "fmt" + "io" + "sort" + + "google.golang.org/genproto/protobuf/field_mask" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" +) + +func getFieldByName(fields protoreflect.FieldDescriptors, name string) protoreflect.FieldDescriptor { + fd := fields.ByName(protoreflect.Name(name)) + if fd != nil { + return fd + } + + return fields.ByJSONName(name) +} + +// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body. +func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.FieldMask, error) { + fm := &field_mask.FieldMask{} + var root interface{} + + if err := json.NewDecoder(r).Decode(&root); err != nil { + if err == io.EOF { + return fm, nil + } + return nil, err + } + + queue := []fieldMaskPathItem{{node: root, msg: msg.ProtoReflect()}} + for len(queue) > 0 { + // dequeue an item + item := queue[0] + queue = queue[1:] + + m, ok := item.node.(map[string]interface{}) + switch { + case ok: + // if the item is an object, then enqueue all of its children + for k, v := range m { + if item.msg == nil { + return nil, fmt.Errorf("JSON structure did not match request type") + } + + fd := getFieldByName(item.msg.Descriptor().Fields(), k) + if fd == nil { + return nil, fmt.Errorf("could not find field %q in %q", k, item.msg.Descriptor().FullName()) + } + + if isDynamicProtoMessage(fd.Message()) { + for _, p := range buildPathsBlindly(k, v) { + newPath := p + if item.path != "" { + newPath = item.path + "." + newPath + } + queue = append(queue, fieldMaskPathItem{path: newPath}) + } + continue + } + + if isProtobufAnyMessage(fd.Message()) { + _, hasTypeField := v.(map[string]interface{})["@type"] + if hasTypeField { + queue = append(queue, fieldMaskPathItem{path: k}) + continue + } else { + return nil, fmt.Errorf("could not find field @type in %q in message %q", k, item.msg.Descriptor().FullName()) + } + + } + + child := fieldMaskPathItem{ + node: v, + } + if item.path == "" { + child.path = string(fd.FullName().Name()) + } else { + child.path = item.path + "." + string(fd.FullName().Name()) + } + + switch { + case fd.IsList(), fd.IsMap(): + // As per: https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/field_mask.proto#L85-L86 + // Do not recurse into repeated fields. The repeated field goes on the end of the path and we stop. + fm.Paths = append(fm.Paths, child.path) + case fd.Message() != nil: + child.msg = item.msg.Get(fd).Message() + fallthrough + default: + queue = append(queue, child) + } + } + case len(item.path) > 0: + // otherwise, it's a leaf node so print its path + fm.Paths = append(fm.Paths, item.path) + } + } + + // Sort for deterministic output in the presence + // of repeated fields. + sort.Strings(fm.Paths) + + return fm, nil +} + +func isProtobufAnyMessage(md protoreflect.MessageDescriptor) bool { + return md != nil && (md.FullName() == "google.protobuf.Any") +} + +func isDynamicProtoMessage(md protoreflect.MessageDescriptor) bool { + return md != nil && (md.FullName() == "google.protobuf.Struct" || md.FullName() == "google.protobuf.Value") +} + +// buildPathsBlindly does not attempt to match proto field names to the +// json value keys. Instead it relies completely on the structure of +// the unmarshalled json contained within in. +// Returns a slice containing all subpaths with the root at the +// passed in name and json value. +func buildPathsBlindly(name string, in interface{}) []string { + m, ok := in.(map[string]interface{}) + if !ok { + return []string{name} + } + + var paths []string + queue := []fieldMaskPathItem{{path: name, node: m}} + for len(queue) > 0 { + cur := queue[0] + queue = queue[1:] + + m, ok := cur.node.(map[string]interface{}) + if !ok { + // This should never happen since we should always check that we only add + // nodes of type map[string]interface{} to the queue. + continue + } + for k, v := range m { + if mi, ok := v.(map[string]interface{}); ok { + queue = append(queue, fieldMaskPathItem{path: cur.path + "." + k, node: mi}) + } else { + // This is not a struct, so there are no more levels to descend. + curPath := cur.path + "." + k + paths = append(paths, curPath) + } + } + } + return paths +} + +// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask +type fieldMaskPathItem struct { + // the list of prior fields leading up to node connected by dots + path string + + // a generic decoded json object the current item to inspect for further path extraction + node interface{} + + // parent message + msg protoreflect.Message +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go similarity index 77% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go index e6e8f286e1294..d1e21df4810a0 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go @@ -2,19 +2,19 @@ package runtime import ( "context" - "errors" "fmt" "io" "net/http" "net/textproto" + "strings" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/internal" + "google.golang.org/genproto/googleapis/api/httpbody" + "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" ) -var errEmptyResponse = errors.New("empty response") - // ForwardResponseStream forwards the stream from gRPC server to REST client. func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { f, ok := w.(http.Flusher) @@ -33,7 +33,6 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal handleForwardResponseServerMetadata(w, mux, md) w.Header().Set("Transfer-Encoding", "chunked") - w.Header().Set("Content-Type", marshaler.ContentType()) if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil { HTTPError(ctx, mux, marshaler, w, req, err) return @@ -61,10 +60,17 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal return } + if !wroteHeader { + w.Header().Set("Content-Type", marshaler.ContentType(resp)) + } + var buf []byte + httpBody, isHTTPBody := resp.(*httpbody.HttpBody) switch { case resp == nil: - buf, err = marshaler.Marshal(errorChunk(streamError(ctx, mux.streamErrorHandler, errEmptyResponse))) + buf, err = marshaler.Marshal(errorChunk(status.New(codes.Internal, "empty response"))) + case isHTTPBody: + buf = httpBody.GetData() default: result := map[string]interface{}{"result": resp} if rb, ok := resp.(responseBody); ok { @@ -132,15 +138,22 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha } handleForwardResponseServerMetadata(w, mux, md) - handleForwardResponseTrailerHeader(w, md) - contentType := marshaler.ContentType() - // Check marshaler on run time in order to keep backwards compatibility - // An interface param needs to be added to the ContentType() function on - // the Marshal interface to be able to remove this check - if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok { - contentType = typeMarshaler.ContentTypeFromMessage(resp) + // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2 + // Unless the request includes a TE header field indicating "trailers" + // is acceptable, as described in Section 4.3, a server SHOULD NOT + // generate trailer fields that it believes are necessary for the user + // agent to receive. + doForwardTrailers := requestAcceptsTrailers(req) + + if doForwardTrailers { + handleForwardResponseTrailerHeader(w, md) + w.Header().Set("Transfer-Encoding", "chunked") } + + handleForwardResponseTrailerHeader(w, md) + + contentType := marshaler.ContentType(resp) w.Header().Set("Content-Type", contentType) if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { @@ -164,7 +177,14 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha grpclog.Infof("Failed to write response: %v", err) } - handleForwardResponseTrailer(w, md) + if doForwardTrailers { + handleForwardResponseTrailer(w, md) + } +} + +func requestAcceptsTrailers(req *http.Request) bool { + te := req.Header.Get("TE") + return strings.Contains(strings.ToLower(te), "trailers") } func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error { @@ -181,11 +201,13 @@ func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, re } func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) { - serr := streamError(ctx, mux.streamErrorHandler, err) + st := mux.streamErrorHandler(ctx, err) + msg := errorChunk(st) if !wroteHeader { - w.WriteHeader(int(serr.HttpCode)) + w.Header().Set("Content-Type", marshaler.ContentType(msg)) + w.WriteHeader(HTTPStatusFromCode(st.Code())) } - buf, merr := marshaler.Marshal(errorChunk(serr)) + buf, merr := marshaler.Marshal(msg) if merr != nil { grpclog.Infof("Failed to marshal an error: %v", merr) return @@ -196,17 +218,6 @@ func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, mar } } -// streamError returns the payload for the final message in a response stream -// that represents the given err. -func streamError(ctx context.Context, errHandler StreamErrorHandlerFunc, err error) *StreamError { - serr := errHandler(ctx, err) - if serr != nil { - return serr - } - // TODO: log about misbehaving stream error handler? - return DefaultHTTPStreamErrorHandler(ctx, err) -} - -func errorChunk(err *StreamError) map[string]proto.Message { - return map[string]proto.Message{"error": (*internal.StreamError)(err)} +func errorChunk(st *status.Status) map[string]proto.Message { + return map[string]proto.Message{"error": st.Proto()} } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go similarity index 54% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go index 525b0338c7471..b86135c889b9f 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go @@ -4,13 +4,6 @@ import ( "google.golang.org/genproto/googleapis/api/httpbody" ) -// SetHTTPBodyMarshaler overwrite the default marshaler with the HTTPBodyMarshaler -func SetHTTPBodyMarshaler(serveMux *ServeMux) { - serveMux.marshalers.mimeMap[MIMEWildcard] = &HTTPBodyMarshaler{ - Marshaler: &JSONPb{OrigName: true}, - } -} - // HTTPBodyMarshaler is a Marshaler which supports marshaling of a // google.api.HttpBody message as the full response body if it is // the actual message used as the response. If not, then this will @@ -19,18 +12,14 @@ type HTTPBodyMarshaler struct { Marshaler } -// ContentType implementation to keep backwards compatibility with marshal interface -func (h *HTTPBodyMarshaler) ContentType() string { - return h.ContentTypeFromMessage(nil) -} - -// ContentTypeFromMessage in case v is a google.api.HttpBody message it returns -// its specified content type otherwise fall back to the default Marshaler. -func (h *HTTPBodyMarshaler) ContentTypeFromMessage(v interface{}) string { +// ContentType returns its specified content type in case v is a +// google.api.HttpBody message, otherwise it will fall back to the default Marshalers +// content type. +func (h *HTTPBodyMarshaler) ContentType(v interface{}) string { if httpBody, ok := v.(*httpbody.HttpBody); ok { return httpBody.GetContentType() } - return h.Marshaler.ContentType() + return h.Marshaler.ContentType(v) } // Marshal marshals "v" by returning the body bytes if v is a diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go similarity index 95% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go index f9d3a585a4c05..d6aa82578369a 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go @@ -15,7 +15,7 @@ import ( type JSONBuiltin struct{} // ContentType always Returns "application/json". -func (*JSONBuiltin) ContentType() string { +func (*JSONBuiltin) ContentType(_ interface{}) string { return "application/json" } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go similarity index 58% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go index f0de351b212dd..7387c8e397690 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go @@ -6,21 +6,25 @@ import ( "fmt" "io" "reflect" + "strconv" - "github.com/golang/protobuf/jsonpb" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" ) // JSONPb is a Marshaler which marshals/unmarshals into/from JSON -// with the "github.com/golang/protobuf/jsonpb". -// It supports fully functionality of protobuf unlike JSONBuiltin. +// with the "google.golang.org/protobuf/encoding/protojson" marshaler. +// It supports the full functionality of protobuf unlike JSONBuiltin. // // The NewDecoder method returns a DecoderWrapper, so the underlying // *json.Decoder methods can be used. -type JSONPb jsonpb.Marshaler +type JSONPb struct { + protojson.MarshalOptions + protojson.UnmarshalOptions +} // ContentType always returns "application/json". -func (*JSONPb) ContentType() string { +func (*JSONPb) ContentType(_ interface{}) string { return "application/json" } @@ -47,7 +51,13 @@ func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error { _, err = w.Write(buf) return err } - return (*jsonpb.Marshaler)(j).Marshal(w, p) + b, err := j.MarshalOptions.Marshal(p) + if err != nil { + return err + } + + _, err = w.Write(b) + return err } var ( @@ -56,8 +66,8 @@ var ( ) // marshalNonProto marshals a non-message field of a protobuf message. -// This function does not correctly marshals arbitrary data structure into JSON, -// but it is only capable of marshaling non-message field values of protobuf, +// This function does not correctly marshal arbitrary data structures into JSON, +// it is only capable of marshaling non-message field values of protobuf, // i.e. primitive types, enums; pointers to primitives or enums; maps from // integer/string types to primitives/enums/pointers to messages. func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { @@ -74,7 +84,7 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { if rv.Kind() == reflect.Slice { if rv.IsNil() { - if j.EmitDefaults { + if j.EmitUnpopulated { return []byte("[]"), nil } return []byte("null"), nil @@ -93,7 +103,37 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { return nil, err } } - if err = (*jsonpb.Marshaler)(j).Marshal(&buf, rv.Index(i).Interface().(proto.Message)); err != nil { + if err = j.marshalTo(&buf, rv.Index(i).Interface().(proto.Message)); err != nil { + return nil, err + } + } + err = buf.WriteByte(']') + if err != nil { + return nil, err + } + + return buf.Bytes(), nil + } + + if rv.Type().Elem().Implements(typeProtoEnum) { + var buf bytes.Buffer + err := buf.WriteByte('[') + if err != nil { + return nil, err + } + for i := 0; i < rv.Len(); i++ { + if i != 0 { + err = buf.WriteByte(',') + if err != nil { + return nil, err + } + } + if j.UseEnumNumbers { + _, err = buf.WriteString(strconv.FormatInt(rv.Index(i).Int(), 10)) + } else { + _, err = buf.WriteString("\"" + rv.Index(i).Interface().(protoEnum).String() + "\"") + } + if err != nil { return nil, err } } @@ -120,7 +160,7 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { } return json.Marshal(m) } - if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts { + if enum, ok := rv.Interface().(protoEnum); ok && !j.UseEnumNumbers { return json.Marshal(enum.String()) } return json.Marshal(rv.Interface()) @@ -128,25 +168,29 @@ func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { // Unmarshal unmarshals JSON "data" into "v" func (j *JSONPb) Unmarshal(data []byte, v interface{}) error { - return unmarshalJSONPb(data, v) + return unmarshalJSONPb(data, j.UnmarshalOptions, v) } // NewDecoder returns a Decoder which reads JSON stream from "r". func (j *JSONPb) NewDecoder(r io.Reader) Decoder { d := json.NewDecoder(r) - return DecoderWrapper{Decoder: d} + return DecoderWrapper{ + Decoder: d, + UnmarshalOptions: j.UnmarshalOptions, + } } // DecoderWrapper is a wrapper around a *json.Decoder that adds // support for protos to the Decode method. type DecoderWrapper struct { *json.Decoder + protojson.UnmarshalOptions } // Decode wraps the embedded decoder's Decode method to support // protos using a jsonpb.Unmarshaler. func (d DecoderWrapper) Decode(v interface{}) error { - return decodeJSONPb(d.Decoder, v) + return decodeJSONPb(d.Decoder, d.UnmarshalOptions, v) } // NewEncoder returns an Encoder which writes JSON stream into "w". @@ -162,21 +206,28 @@ func (j *JSONPb) NewEncoder(w io.Writer) Encoder { }) } -func unmarshalJSONPb(data []byte, v interface{}) error { +func unmarshalJSONPb(data []byte, unmarshaler protojson.UnmarshalOptions, v interface{}) error { d := json.NewDecoder(bytes.NewReader(data)) - return decodeJSONPb(d, v) + return decodeJSONPb(d, unmarshaler, v) } -func decodeJSONPb(d *json.Decoder, v interface{}) error { +func decodeJSONPb(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error { p, ok := v.(proto.Message) if !ok { - return decodeNonProtoField(d, v) + return decodeNonProtoField(d, unmarshaler, v) } - unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields} - return unmarshaler.UnmarshalNext(d, p) + + // Decode into bytes for marshalling + var b json.RawMessage + err := d.Decode(&b) + if err != nil { + return err + } + + return unmarshaler.Unmarshal([]byte(b), p) } -func decodeNonProtoField(d *json.Decoder, v interface{}) error { +func decodeNonProtoField(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error { rv := reflect.ValueOf(v) if rv.Kind() != reflect.Ptr { return fmt.Errorf("%T is not a pointer", v) @@ -186,8 +237,14 @@ func decodeNonProtoField(d *json.Decoder, v interface{}) error { rv.Set(reflect.New(rv.Type().Elem())) } if rv.Type().ConvertibleTo(typeProtoMessage) { - unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields} - return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message)) + // Decode into bytes for marshalling + var b json.RawMessage + err := d.Decode(&b) + if err != nil { + return err + } + + return unmarshaler.Unmarshal([]byte(b), rv.Interface().(proto.Message)) } rv = rv.Elem() } @@ -211,24 +268,45 @@ func decodeNonProtoField(d *json.Decoder, v interface{}) error { } bk := result[0] bv := reflect.New(rv.Type().Elem()) - if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil { + if v == nil { + null := json.RawMessage("null") + v = &null + } + if err := unmarshalJSONPb([]byte(*v), unmarshaler, bv.Interface()); err != nil { return err } rv.SetMapIndex(bk, bv.Elem()) } return nil } + if rv.Kind() == reflect.Slice { + var sl []json.RawMessage + if err := d.Decode(&sl); err != nil { + return err + } + if sl != nil { + rv.Set(reflect.MakeSlice(rv.Type(), 0, 0)) + } + for _, item := range sl { + bv := reflect.New(rv.Type().Elem()) + if err := unmarshalJSONPb([]byte(item), unmarshaler, bv.Interface()); err != nil { + return err + } + rv.Set(reflect.Append(rv, bv.Elem())) + } + return nil + } if _, ok := rv.Interface().(protoEnum); ok { var repr interface{} if err := d.Decode(&repr); err != nil { return err } - switch repr.(type) { + switch v := repr.(type) { case string: // TODO(yugui) Should use proto.StructProperties? return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface()) case float64: - rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type())) + rv.Set(reflect.ValueOf(int32(v)).Convert(rv.Type())) return nil default: return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface()) @@ -242,6 +320,8 @@ type protoEnum interface { EnumDescriptor() ([]byte, []int) } +var typeProtoEnum = reflect.TypeOf((*protoEnum)(nil)).Elem() + var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem() // Delimiter for newline encoded JSON streams. @@ -249,14 +329,16 @@ func (j *JSONPb) Delimiter() []byte { return []byte("\n") } -// allowUnknownFields helps not to return an error when the destination -// is a struct and the input contains object keys which do not match any -// non-ignored, exported fields in the destination. -var allowUnknownFields = true - -// DisallowUnknownFields enables option in decoder (unmarshaller) to -// return an error when it finds an unknown field. This function must be -// called before using the JSON marshaller. -func DisallowUnknownFields() { - allowUnknownFields = false -} +var ( + convFromType = map[reflect.Kind]reflect.Value{ + reflect.String: reflect.ValueOf(String), + reflect.Bool: reflect.ValueOf(Bool), + reflect.Float64: reflect.ValueOf(Float64), + reflect.Float32: reflect.ValueOf(Float32), + reflect.Int64: reflect.ValueOf(Int64), + reflect.Int32: reflect.ValueOf(Int32), + reflect.Uint64: reflect.ValueOf(Uint64), + reflect.Uint32: reflect.ValueOf(Uint32), + reflect.Slice: reflect.ValueOf(Bytes), + } +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go similarity index 93% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go index f65d1a2676b87..007f8f1a2c7f1 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go @@ -4,15 +4,16 @@ import ( "io" "errors" - "github.com/golang/protobuf/proto" "io/ioutil" + + "google.golang.org/protobuf/proto" ) // ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes type ProtoMarshaller struct{} // ContentType always returns "application/octet-stream". -func (*ProtoMarshaller) ContentType() string { +func (*ProtoMarshaller) ContentType(_ interface{}) string { return "application/octet-stream" } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go similarity index 80% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go index 46153294217fd..2c0d25ff4935a 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go @@ -16,14 +16,9 @@ type Marshaler interface { // NewEncoder returns an Encoder which writes bytes sequence into "w". NewEncoder(w io.Writer) Encoder // ContentType returns the Content-Type which this marshaler is responsible for. - ContentType() string -} - -// Marshalers that implement contentTypeMarshaler will have their ContentTypeFromMessage method called -// to set the Content-Type header on the response -type contentTypeMarshaler interface { - // ContentTypeFromMessage returns the Content-Type this marshaler produces from the provided message - ContentTypeFromMessage(v interface{}) string + // The parameter describes the type which is being marshalled, which can sometimes + // affect the content type returned. + ContentType(v interface{}) string } // Decoder decodes a byte sequence diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go similarity index 91% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go index 8dd5c24db4279..a714de0240682 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go @@ -6,6 +6,7 @@ import ( "net/http" "google.golang.org/grpc/grpclog" + "google.golang.org/protobuf/encoding/protojson" ) // MIMEWildcard is the fallback MIME type used for requests which do not match @@ -16,7 +17,16 @@ var ( acceptHeader = http.CanonicalHeaderKey("Accept") contentTypeHeader = http.CanonicalHeaderKey("Content-Type") - defaultMarshaler = &JSONPb{OrigName: true} + defaultMarshaler = &HTTPBodyMarshaler{ + Marshaler: &JSONPb{ + MarshalOptions: protojson.MarshalOptions{ + EmitUnpopulated: true, + }, + UnmarshalOptions: protojson.UnmarshalOptions{ + DiscardUnknown: true, + }, + }, + } ) // MarshalerForRequest returns the inbound/outbound marshalers for this request. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go similarity index 59% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go index 523a9cb43c930..46a4aabaf9555 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go @@ -2,28 +2,47 @@ package runtime import ( "context" + "errors" "fmt" "net/http" "net/textproto" "strings" - "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule" "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" +) + +// UnescapingMode defines the behavior of ServeMux when unescaping path parameters. +type UnescapingMode int + +const ( + // UnescapingModeLegacy is the default V2 behavior, which escapes the entire + // path string before doing any routing. + UnescapingModeLegacy UnescapingMode = iota + + // EscapingTypeExceptReserved unescapes all path parameters except RFC 6570 + // reserved characters. + UnescapingModeAllExceptReserved + + // EscapingTypeExceptSlash unescapes URL path parameters except path + // seperators, which will be left as "%2F". + UnescapingModeAllExceptSlash + + // URL path parameters will be fully decoded. + UnescapingModeAllCharacters + + // UnescapingModeDefault is the default escaping type. + // TODO(v3): default this to UnescapingModeAllExceptReserved per grpc-httpjson-transcoding's + // reference implementation + UnescapingModeDefault = UnescapingModeLegacy ) // A HandlerFunc handles a specific pair of path pattern and HTTP method. type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) -// ErrUnknownURI is the error supplied to a custom ProtoErrorHandlerFunc when -// a request is received with a URI path that does not match any registered -// service method. -// -// Since gRPC servers return an "Unimplemented" code for requests with an -// unrecognized URI path, this error also has a gRPC "Unimplemented" code. -var ErrUnknownURI = status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented)) - // ServeMux is a request multiplexer for grpc-gateway. // It matches http requests to patterns and invokes the corresponding handler. type ServeMux struct { @@ -34,10 +53,11 @@ type ServeMux struct { incomingHeaderMatcher HeaderMatcherFunc outgoingHeaderMatcher HeaderMatcherFunc metadataAnnotators []func(context.Context, *http.Request) metadata.MD + errorHandler ErrorHandlerFunc streamErrorHandler StreamErrorHandlerFunc - protoErrorHandler ProtoErrorHandlerFunc + routingErrorHandler RoutingErrorHandlerFunc disablePathLengthFallback bool - lastMatchWins bool + unescapingMode UnescapingMode } // ServeMuxOption is an option that can be given to a ServeMux on construction. @@ -55,8 +75,16 @@ func WithForwardResponseOption(forwardResponseOption func(context.Context, http. } } +// WithEscapingType sets the escaping type. See the definitions of UnescapingMode +// for more information. +func WithUnescapingMode(mode UnescapingMode) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.unescapingMode = mode + } +} + // SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters. -// Configuring this will mean the generated swagger output is no longer correct, and it should be +// Configuring this will mean the generated OpenAPI output is no longer correct, and it should be // done with careful consideration. func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMuxOption { return func(serveMux *ServeMux) { @@ -111,21 +139,12 @@ func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) Se } } -// WithProtoErrorHandler returns a ServeMuxOption for configuring a custom error handler. +// WithErrorHandler returns a ServeMuxOption for configuring a custom error handler. // -// This can be used to handle an error as general proto message defined by gRPC. -// When this option is used, the mux uses the configured error handler instead of HTTPError and -// OtherErrorHandler. -func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption { +// This can be used to configure a custom error response. +func WithErrorHandler(fn ErrorHandlerFunc) ServeMuxOption { return func(serveMux *ServeMux) { - serveMux.protoErrorHandler = fn - } -} - -// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback. -func WithDisablePathLengthFallback() ServeMuxOption { - return func(serveMux *ServeMux) { - serveMux.disablePathLengthFallback = true + serveMux.errorHandler = fn } } @@ -134,7 +153,7 @@ func WithDisablePathLengthFallback() ServeMuxOption { // calls. // // For stream errors that occur before any response has been written, the mux's -// ProtoErrorHandler will be invoked. However, once data has been written, the errors must +// ErrorHandler will be invoked. However, once data has been written, the errors must // be handled differently: they must be included in the response body. The response body's // final message will include the error details returned by the stream error handler. func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption { @@ -143,12 +162,20 @@ func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption { } } -// WithLastMatchWins returns a ServeMuxOption that will enable "last -// match wins" behavior, where if multiple path patterns match a -// request path, the last one defined in the .proto file will be used. -func WithLastMatchWins() ServeMuxOption { +// WithRoutingErrorHandler returns a ServeMuxOption for configuring a custom error handler to handle http routing errors. +// +// Method called for errors which can happen before gRPC route selected or executed. +// The following error codes: StatusMethodNotAllowed StatusNotFound StatusBadRequest +func WithRoutingErrorHandler(fn RoutingErrorHandlerFunc) ServeMuxOption { return func(serveMux *ServeMux) { - serveMux.lastMatchWins = true + serveMux.routingErrorHandler = fn + } +} + +// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback. +func WithDisablePathLengthFallback() ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.disablePathLengthFallback = true } } @@ -158,7 +185,10 @@ func NewServeMux(opts ...ServeMuxOption) *ServeMux { handlers: make(map[string][]handler), forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), marshalers: makeMarshalerMIMERegistry(), - streamErrorHandler: DefaultHTTPStreamErrorHandler, + errorHandler: DefaultHTTPErrorHandler, + streamErrorHandler: DefaultStreamErrorHandler, + routingErrorHandler: DefaultRoutingErrorHandler, + unescapingMode: UnescapingModeDefault, } for _, opt := range opts { @@ -180,11 +210,23 @@ func NewServeMux(opts ...ServeMuxOption) *ServeMux { // Handle associates "h" to the pair of HTTP method and path pattern. func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { - if s.lastMatchWins { - s.handlers[meth] = append([]handler{handler{pat: pat, h: h}}, s.handlers[meth]...) - } else { - s.handlers[meth] = append(s.handlers[meth], handler{pat: pat, h: h}) + s.handlers[meth] = append([]handler{{pat: pat, h: h}}, s.handlers[meth]...) +} + +// HandlePath allows users to configure custom path handlers. +// refer: https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/inject_router/ +func (s *ServeMux) HandlePath(meth string, pathPattern string, h HandlerFunc) error { + compiler, err := httprule.Parse(pathPattern) + if err != nil { + return fmt.Errorf("parsing path pattern: %w", err) + } + tp := compiler.Compile() + pattern, err := NewPattern(tp.Version, tp.OpCodes, tp.Pool, tp.Verb) + if err != nil { + return fmt.Errorf("creating new pattern: %w", err) } + s.Handle(meth, pattern, h) + return nil } // ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path. @@ -193,48 +235,66 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { path := r.URL.Path if !strings.HasPrefix(path, "/") { - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest)) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) - } + _, outboundMarshaler := MarshalerForRequest(s, r) + s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusBadRequest) return } - components := strings.Split(path[1:], "/") - l := len(components) - var verb string - if idx := strings.LastIndex(components[l-1], ":"); idx == 0 { - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) - } else { - OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) - } - return - } else if idx > 0 { - c := components[l-1] - components[l-1], verb = c[:idx], c[idx+1:] + // TODO(v3): remove UnescapingModeLegacy + if s.unescapingMode != UnescapingModeLegacy && r.URL.RawPath != "" { + path = r.URL.RawPath } + components := strings.Split(path[1:], "/") + if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) { r.Method = strings.ToUpper(override) if err := r.ParseForm(); err != nil { - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.InvalidArgument, err.Error()) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) - } + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr) return } } + + // Verb out here is to memoize for the fallback case below + var verb string + for _, h := range s.handlers[r.Method] { - pathParams, err := h.pat.Match(components, verb) + // If the pattern has a verb, explicitly look for a suffix in the last + // component that matches a colon plus the verb. This allows us to + // handle some cases that otherwise can't be correctly handled by the + // former LastIndex case, such as when the verb literal itself contains + // a colon. This should work for all cases that have run through the + // parser because we know what verb we're looking for, however, there + // are still some cases that the parser itself cannot disambiguate. See + // the comment there if interested. + patVerb := h.pat.Verb() + l := len(components) + lastComponent := components[l-1] + var idx int = -1 + if patVerb != "" && strings.HasSuffix(lastComponent, ":"+patVerb) { + idx = len(lastComponent) - len(patVerb) - 1 + } + if idx == 0 { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound) + return + } + if idx > 0 { + components[l-1], verb = lastComponent[:idx], lastComponent[idx+1:] + } + + pathParams, err := h.pat.MatchAndEscape(components, verb, s.unescapingMode) if err != nil { + var mse MalformedSequenceError + if ok := errors.As(err, &mse); ok { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{ + HTTPStatus: http.StatusBadRequest, + Err: mse, + }) + } continue } h.h(w, r, pathParams) @@ -242,47 +302,43 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { } // lookup other methods to handle fallback from GET to POST and - // to determine if it is MethodNotAllowed or NotFound. + // to determine if it is NotImplemented or NotFound. for m, handlers := range s.handlers { if m == r.Method { continue } for _, h := range handlers { - pathParams, err := h.pat.Match(components, verb) + pathParams, err := h.pat.MatchAndEscape(components, verb, s.unescapingMode) if err != nil { + var mse MalformedSequenceError + if ok := errors.As(err, &mse); ok { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{ + HTTPStatus: http.StatusBadRequest, + Err: mse, + }) + } continue } // X-HTTP-Method-Override is optional. Always allow fallback to POST. if s.isPathLengthFallback(r) { if err := r.ParseForm(); err != nil { - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - sterr := status.Error(codes.InvalidArgument, err.Error()) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) - } else { - OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) - } + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr) return } h.h(w, r, pathParams) return } - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) - } else { - OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) - } + _, outboundMarshaler := MarshalerForRequest(s, r) + s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusMethodNotAllowed) return } } - if s.protoErrorHandler != nil { - _, outboundMarshaler := MarshalerForRequest(s, r) - s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) - } else { - OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) - } + _, outboundMarshaler := MarshalerForRequest(s, r) + s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound) } // GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go similarity index 57% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go index 09053695da7eb..df7cb81426a0b 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go @@ -3,9 +3,10 @@ package runtime import ( "errors" "fmt" + "strconv" "strings" - "github.com/grpc-ecosystem/grpc-gateway/utilities" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" "google.golang.org/grpc/grpclog" ) @@ -14,14 +15,23 @@ var ( ErrNotMatch = errors.New("not match to the path pattern") // ErrInvalidPattern indicates that the given definition of Pattern is not valid. ErrInvalidPattern = errors.New("invalid pattern") + // ErrMalformedSequence indicates that an escape sequence was malformed. + ErrMalformedSequence = errors.New("malformed escape sequence") ) +type MalformedSequenceError string + +func (e MalformedSequenceError) Error() string { + return "malformed path escape " + strconv.Quote(string(e)) +} + type op struct { code utilities.OpCode operand int } -// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto. +// Pattern is a template pattern of http request paths defined in +// https://github.com/googleapis/googleapis/blob/master/google/api/http.proto type Pattern struct { // ops is a list of operations ops []op @@ -35,31 +45,14 @@ type Pattern struct { tailLen int // verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part. verb string - // assumeColonVerb indicates whether a path suffix after a final - // colon may only be interpreted as a verb. - assumeColonVerb bool } -type patternOptions struct { - assumeColonVerb bool -} - -// PatternOpt is an option for creating Patterns. -type PatternOpt func(*patternOptions) - // NewPattern returns a new Pattern from the given definition values. // "ops" is a sequence of op codes. "pool" is a constant pool. // "verb" is the verb part of the pattern. It is empty if the pattern does not have the part. // "version" must be 1 for now. // It returns an error if the given definition is invalid. -func NewPattern(version int, ops []int, pool []string, verb string, opts ...PatternOpt) (Pattern, error) { - options := patternOptions{ - assumeColonVerb: true, - } - for _, o := range opts { - o(&options) - } - +func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) { if version != 1 { grpclog.Infof("unsupported version: %d", version) return Pattern{}, ErrInvalidPattern @@ -111,7 +104,7 @@ func NewPattern(version int, ops []int, pool []string, verb string, opts ...Patt } stack -= op.operand if stack < 0 { - grpclog.Print("stack underflow") + grpclog.Info("stack underflow") return Pattern{}, ErrInvalidPattern } stack++ @@ -139,13 +132,12 @@ func NewPattern(version int, ops []int, pool []string, verb string, opts ...Patt typedOps = append(typedOps, op) } return Pattern{ - ops: typedOps, - pool: pool, - vars: vars, - stacksize: maxstack, - tailLen: tailLen, - verb: verb, - assumeColonVerb: options.assumeColonVerb, + ops: typedOps, + pool: pool, + vars: vars, + stacksize: maxstack, + tailLen: tailLen, + verb: verb, }, nil } @@ -157,12 +149,13 @@ func MustPattern(p Pattern, err error) Pattern { return p } -// Match examines components if it matches to the Pattern. -// If it matches, the function returns a mapping from field paths to their captured values. -// If otherwise, the function returns an error. -func (p Pattern) Match(components []string, verb string) (map[string]string, error) { +// MatchAndEscape examines components to determine if they match to a Pattern. +// MatchAndEscape will return an error if no Patterns matched or if a pattern +// matched but contained malformed escape sequences. If successful, the function +// returns a mapping from field paths to their captured values. +func (p Pattern) MatchAndEscape(components []string, verb string, unescapingMode UnescapingMode) (map[string]string, error) { if p.verb != verb { - if p.assumeColonVerb || p.verb != "" { + if p.verb != "" { return nil, ErrNotMatch } if len(components) == 0 { @@ -171,7 +164,6 @@ func (p Pattern) Match(components []string, verb string) (map[string]string, err components = append([]string{}, components...) components[len(components)-1] += ":" + verb } - verb = "" } var pos int @@ -179,6 +171,8 @@ func (p Pattern) Match(components []string, verb string) (map[string]string, err captured := make([]string, len(p.vars)) l := len(components) for _, op := range p.ops { + var err error + switch op.code { case utilities.OpNop: continue @@ -191,6 +185,10 @@ func (p Pattern) Match(components []string, verb string) (map[string]string, err if lit := p.pool[op.operand]; c != lit { return nil, ErrNotMatch } + } else if op.code == utilities.OpPush { + if c, err = unescape(c, unescapingMode, false); err != nil { + return nil, err + } } stack = append(stack, c) pos++ @@ -200,7 +198,11 @@ func (p Pattern) Match(components []string, verb string) (map[string]string, err return nil, ErrNotMatch } end -= p.tailLen - stack = append(stack, strings.Join(components[pos:end], "/")) + c := strings.Join(components[pos:end], "/") + if c, err = unescape(c, unescapingMode, true); err != nil { + return nil, err + } + stack = append(stack, c) pos = end case utilities.OpConcatN: n := op.operand @@ -222,6 +224,16 @@ func (p Pattern) Match(components []string, verb string) (map[string]string, err return bindings, nil } +// MatchAndEscape examines components to determine if they match to a Pattern. +// It will never perform per-component unescaping (see: UnescapingModeLegacy). +// MatchAndEscape will return an error if no Patterns matched. If successful, +// the function returns a mapping from field paths to their captured values. +// +// Deprecated: Use MatchAndEscape. +func (p Pattern) Match(components []string, verb string) (map[string]string, error) { + return p.MatchAndEscape(components, verb, UnescapingModeDefault) +} + // Verb returns the verb part of the Pattern. func (p Pattern) Verb() string { return p.verb } @@ -253,10 +265,119 @@ func (p Pattern) String() string { return "/" + segs } -// AssumeColonVerbOpt indicates whether a path suffix after a final -// colon may only be interpreted as a verb. -func AssumeColonVerbOpt(val bool) PatternOpt { - return PatternOpt(func(o *patternOptions) { - o.assumeColonVerb = val - }) +/* + * The following code is adopted and modified from Go's standard library + * and carries the attached license. + * + * Copyright 2009 The Go Authors. All rights reserved. + * Use of this source code is governed by a BSD-style + * license that can be found in the LICENSE file. + */ + +// ishex returns whether or not the given byte is a valid hex character +func ishex(c byte) bool { + switch { + case '0' <= c && c <= '9': + return true + case 'a' <= c && c <= 'f': + return true + case 'A' <= c && c <= 'F': + return true + } + return false +} + +func isRFC6570Reserved(c byte) bool { + switch c { + case '!', '#', '$', '&', '\'', '(', ')', '*', + '+', ',', '/', ':', ';', '=', '?', '@', '[', ']': + return true + default: + return false + } +} + +// unhex converts a hex point to the bit representation +func unhex(c byte) byte { + switch { + case '0' <= c && c <= '9': + return c - '0' + case 'a' <= c && c <= 'f': + return c - 'a' + 10 + case 'A' <= c && c <= 'F': + return c - 'A' + 10 + } + return 0 +} + +// shouldUnescapeWithMode returns true if the character is escapable with the +// given mode +func shouldUnescapeWithMode(c byte, mode UnescapingMode) bool { + switch mode { + case UnescapingModeAllExceptReserved: + if isRFC6570Reserved(c) { + return false + } + case UnescapingModeAllExceptSlash: + if c == '/' { + return false + } + case UnescapingModeAllCharacters: + return true + } + return true +} + +// unescape unescapes a path string using the provided mode +func unescape(s string, mode UnescapingMode, multisegment bool) (string, error) { + // TODO(v3): remove UnescapingModeLegacy + if mode == UnescapingModeLegacy { + return s, nil + } + + if !multisegment { + mode = UnescapingModeAllCharacters + } + + // Count %, check that they're well-formed. + n := 0 + for i := 0; i < len(s); { + if s[i] == '%' { + n++ + if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) { + s = s[i:] + if len(s) > 3 { + s = s[:3] + } + + return "", MalformedSequenceError(s) + } + i += 3 + } else { + i++ + } + } + + if n == 0 { + return s, nil + } + + var t strings.Builder + t.Grow(len(s)) + for i := 0; i < len(s); i++ { + switch s[i] { + case '%': + c := unhex(s[i+1])<<4 | unhex(s[i+2]) + if shouldUnescapeWithMode(c, mode) { + t.WriteByte(c) + i += 2 + continue + } + fallthrough + default: + t.WriteByte(s[i]) + } + } + + return t.String(), nil } diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go similarity index 98% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go index a3151e2a5528d..d549407f20fbf 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go @@ -1,7 +1,7 @@ package runtime import ( - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" ) // StringP returns a pointer to a string whose pointee is same as the given string value. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go new file mode 100644 index 0000000000000..fb0c84ef0cdd4 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go @@ -0,0 +1,329 @@ +package runtime + +import ( + "encoding/base64" + "errors" + "fmt" + "net/url" + "regexp" + "strconv" + "strings" + "time" + + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/genproto/protobuf/field_mask" + "google.golang.org/grpc/grpclog" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/types/known/durationpb" + "google.golang.org/protobuf/types/known/timestamppb" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +var valuesKeyRegexp = regexp.MustCompile(`^(.*)\[(.*)\]$`) + +var currentQueryParser QueryParameterParser = &defaultQueryParser{} + +// QueryParameterParser defines interface for all query parameter parsers +type QueryParameterParser interface { + Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error +} + +// PopulateQueryParameters parses query parameters +// into "msg" using current query parser +func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { + return currentQueryParser.Parse(msg, values, filter) +} + +type defaultQueryParser struct{} + +// Parse populates "values" into "msg". +// A value is ignored if its key starts with one of the elements in "filter". +func (*defaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { + for key, values := range values { + match := valuesKeyRegexp.FindStringSubmatch(key) + if len(match) == 3 { + key = match[1] + values = append([]string{match[2]}, values...) + } + fieldPath := strings.Split(key, ".") + if filter.HasCommonPrefix(fieldPath) { + continue + } + if err := populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, values); err != nil { + return err + } + } + return nil +} + +// PopulateFieldFromPath sets a value in a nested Protobuf structure. +func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error { + fieldPath := strings.Split(fieldPathString, ".") + return populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, []string{value}) +} + +func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []string, values []string) error { + if len(fieldPath) < 1 { + return errors.New("no field path") + } + if len(values) < 1 { + return errors.New("no value provided") + } + + var fieldDescriptor protoreflect.FieldDescriptor + for i, fieldName := range fieldPath { + fields := msgValue.Descriptor().Fields() + + // Get field by name + fieldDescriptor = fields.ByName(protoreflect.Name(fieldName)) + if fieldDescriptor == nil { + fieldDescriptor = fields.ByJSONName(fieldName) + if fieldDescriptor == nil { + // We're not returning an error here because this could just be + // an extra query parameter that isn't part of the request. + grpclog.Infof("field not found in %q: %q", msgValue.Descriptor().FullName(), strings.Join(fieldPath, ".")) + return nil + } + } + + // If this is the last element, we're done + if i == len(fieldPath)-1 { + break + } + + // Only singular message fields are allowed + if fieldDescriptor.Message() == nil || fieldDescriptor.Cardinality() == protoreflect.Repeated { + return fmt.Errorf("invalid path: %q is not a message", fieldName) + } + + // Get the nested message + msgValue = msgValue.Mutable(fieldDescriptor).Message() + } + + // Check if oneof already set + if of := fieldDescriptor.ContainingOneof(); of != nil { + if f := msgValue.WhichOneof(of); f != nil { + return fmt.Errorf("field already set for oneof %q", of.FullName().Name()) + } + } + + switch { + case fieldDescriptor.IsList(): + return populateRepeatedField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).List(), values) + case fieldDescriptor.IsMap(): + return populateMapField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).Map(), values) + } + + if len(values) > 1 { + return fmt.Errorf("too many values for field %q: %s", fieldDescriptor.FullName().Name(), strings.Join(values, ", ")) + } + + return populateField(fieldDescriptor, msgValue, values[0]) +} + +func populateField(fieldDescriptor protoreflect.FieldDescriptor, msgValue protoreflect.Message, value string) error { + v, err := parseField(fieldDescriptor, value) + if err != nil { + return fmt.Errorf("parsing field %q: %w", fieldDescriptor.FullName().Name(), err) + } + + msgValue.Set(fieldDescriptor, v) + return nil +} + +func populateRepeatedField(fieldDescriptor protoreflect.FieldDescriptor, list protoreflect.List, values []string) error { + for _, value := range values { + v, err := parseField(fieldDescriptor, value) + if err != nil { + return fmt.Errorf("parsing list %q: %w", fieldDescriptor.FullName().Name(), err) + } + list.Append(v) + } + + return nil +} + +func populateMapField(fieldDescriptor protoreflect.FieldDescriptor, mp protoreflect.Map, values []string) error { + if len(values) != 2 { + return fmt.Errorf("more than one value provided for key %q in map %q", values[0], fieldDescriptor.FullName()) + } + + key, err := parseField(fieldDescriptor.MapKey(), values[0]) + if err != nil { + return fmt.Errorf("parsing map key %q: %w", fieldDescriptor.FullName().Name(), err) + } + + value, err := parseField(fieldDescriptor.MapValue(), values[1]) + if err != nil { + return fmt.Errorf("parsing map value %q: %w", fieldDescriptor.FullName().Name(), err) + } + + mp.Set(key.MapKey(), value) + + return nil +} + +func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (protoreflect.Value, error) { + switch fieldDescriptor.Kind() { + case protoreflect.BoolKind: + v, err := strconv.ParseBool(value) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfBool(v), nil + case protoreflect.EnumKind: + enum, err := protoregistry.GlobalTypes.FindEnumByName(fieldDescriptor.Enum().FullName()) + switch { + case errors.Is(err, protoregistry.NotFound): + return protoreflect.Value{}, fmt.Errorf("enum %q is not registered", fieldDescriptor.Enum().FullName()) + case err != nil: + return protoreflect.Value{}, fmt.Errorf("failed to look up enum: %w", err) + } + // Look for enum by name + v := enum.Descriptor().Values().ByName(protoreflect.Name(value)) + if v == nil { + i, err := strconv.Atoi(value) + if err != nil { + return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value) + } + // Look for enum by number + v = enum.Descriptor().Values().ByNumber(protoreflect.EnumNumber(i)) + if v == nil { + return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value) + } + } + return protoreflect.ValueOfEnum(v.Number()), nil + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + v, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfInt32(int32(v)), nil + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfInt64(v), nil + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + v, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfUint32(uint32(v)), nil + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfUint64(v), nil + case protoreflect.FloatKind: + v, err := strconv.ParseFloat(value, 32) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfFloat32(float32(v)), nil + case protoreflect.DoubleKind: + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfFloat64(v), nil + case protoreflect.StringKind: + return protoreflect.ValueOfString(value), nil + case protoreflect.BytesKind: + v, err := base64.URLEncoding.DecodeString(value) + if err != nil { + return protoreflect.Value{}, err + } + return protoreflect.ValueOfBytes(v), nil + case protoreflect.MessageKind, protoreflect.GroupKind: + return parseMessage(fieldDescriptor.Message(), value) + default: + panic(fmt.Sprintf("unknown field kind: %v", fieldDescriptor.Kind())) + } +} + +func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (protoreflect.Value, error) { + var msg proto.Message + switch msgDescriptor.FullName() { + case "google.protobuf.Timestamp": + if value == "null" { + break + } + t, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + return protoreflect.Value{}, err + } + msg = timestamppb.New(t) + case "google.protobuf.Duration": + if value == "null" { + break + } + d, err := time.ParseDuration(value) + if err != nil { + return protoreflect.Value{}, err + } + msg = durationpb.New(d) + case "google.protobuf.DoubleValue": + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return protoreflect.Value{}, err + } + msg = &wrapperspb.DoubleValue{Value: v} + case "google.protobuf.FloatValue": + v, err := strconv.ParseFloat(value, 32) + if err != nil { + return protoreflect.Value{}, err + } + msg = &wrapperspb.FloatValue{Value: float32(v)} + case "google.protobuf.Int64Value": + v, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } + msg = &wrapperspb.Int64Value{Value: v} + case "google.protobuf.Int32Value": + v, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } + msg = &wrapperspb.Int32Value{Value: int32(v)} + case "google.protobuf.UInt64Value": + v, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return protoreflect.Value{}, err + } + msg = &wrapperspb.UInt64Value{Value: v} + case "google.protobuf.UInt32Value": + v, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return protoreflect.Value{}, err + } + msg = &wrapperspb.UInt32Value{Value: uint32(v)} + case "google.protobuf.BoolValue": + v, err := strconv.ParseBool(value) + if err != nil { + return protoreflect.Value{}, err + } + msg = &wrapperspb.BoolValue{Value: v} + case "google.protobuf.StringValue": + msg = &wrapperspb.StringValue{Value: value} + case "google.protobuf.BytesValue": + v, err := base64.URLEncoding.DecodeString(value) + if err != nil { + return protoreflect.Value{}, err + } + msg = &wrapperspb.BytesValue{Value: v} + case "google.protobuf.FieldMask": + fm := &field_mask.FieldMask{} + fm.Paths = append(fm.Paths, strings.Split(value, ",")...) + msg = fm + default: + return protoreflect.Value{}, fmt.Errorf("unsupported message type: %q", string(msgDescriptor.FullName())) + } + + return protoreflect.ValueOfMessage(msg.ProtoReflect()), nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel similarity index 59% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel index 7109d79323187..5d8d12bc42117 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel @@ -3,19 +3,25 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") package(default_visibility = ["//visibility:public"]) go_library( - name = "go_default_library", + name = "utilities", srcs = [ "doc.go", "pattern.go", "readerfactory.go", "trie.go", ], - importpath = "github.com/grpc-ecosystem/grpc-gateway/utilities", + importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/utilities", ) go_test( - name = "go_default_test", + name = "utilities_test", size = "small", srcs = ["trie_test.go"], - embed = [":go_default_library"], + deps = [":utilities"], +) + +alias( + name = "go_default_library", + actual = ":utilities", + visibility = ["//visibility:public"], ) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go similarity index 100% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go similarity index 100% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go similarity index 100% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go similarity index 98% rename from vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go index c2b7b30dd9177..af3b703d50571 100644 --- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go @@ -145,10 +145,7 @@ func (l byLex) Less(i, j int) bool { return false } } - if k < len(sj) { - return true - } - return false + return k < len(sj) } // HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence. diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md index aa8cbd7ce6d73..7e6f7aeee82eb 100644 --- a/vendor/github.com/imdario/mergo/README.md +++ b/vendor/github.com/imdario/mergo/README.md @@ -8,8 +8,7 @@ [![Coverage Status][9]][10] [![Sourcegraph][11]][12] [![FOSSA Status][13]][14] - -[![GoCenter Kudos][15]][16] +[![Become my sponsor][15]][16] [1]: https://travis-ci.org/imdario/mergo.png [2]: https://travis-ci.org/imdario/mergo @@ -25,8 +24,8 @@ [12]: https://sourcegraph.com/github.com/imdario/mergo?badge [13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield [14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield -[15]: https://search.gocenter.io/api/ui/badge/github.com%2Fimdario%2Fmergo -[16]: https://search.gocenter.io/github.com/imdario/mergo +[15]: https://img.shields.io/github/sponsors/imdario +[16]: https://github.com/sponsors/imdario A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. @@ -36,11 +35,11 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the ## Status -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). +It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild). ### Important note -Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds suppot for go modules. +Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules. Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. @@ -51,12 +50,12 @@ If you were using Mergo before April 6th, 2015, please check your project works If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: Buy Me a Coffee at ko-fi.com -[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) -[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo) Donate using Liberapay +Become my sponsor ### Mergo in the wild +- [cli/cli](https://github.com/cli/cli) - [moby/moby](https://github.com/moby/moby) - [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) - [vmware/dispatch](https://github.com/vmware/dispatch) @@ -98,6 +97,8 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont - [jnuthong/item_search](https://github.com/jnuthong/item_search) - [bukalapak/snowboard](https://github.com/bukalapak/snowboard) - [containerssh/containerssh](https://github.com/containerssh/containerssh) +- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) +- [tjpnz/structbot](https://github.com/tjpnz/structbot) ## Install @@ -168,7 +169,7 @@ func main() { Note: if test are failing due missing package, please execute: - go get gopkg.in/yaml.v2 + go get gopkg.in/yaml.v3 ### Transformers @@ -218,7 +219,6 @@ func main() { } ``` - ## Contact me If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) @@ -227,18 +227,6 @@ If I can help you, you have an idea or you are using Mergo in your projects, don Written by [Dario Castañé](http://dario.im). -## Top Contributors - -[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0) -[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1) -[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2) -[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3) -[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4) -[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5) -[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6) -[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7) - - ## License [BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go index 8c2a8fcd9019e..8b4e2f47a08cc 100644 --- a/vendor/github.com/imdario/mergo/merge.go +++ b/vendor/github.com/imdario/mergo/merge.go @@ -79,7 +79,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co visited[h] = &visit{addr, typ, seen} } - if config.Transformers != nil && !isEmptyValue(dst) { + if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() { if fn := config.Transformers.Transformer(dst.Type()); fn != nil { err = fn(dst, src) return diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go index 3cc926c7f6245..9fe362d476aa0 100644 --- a/vendor/github.com/imdario/mergo/mergo.go +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -17,7 +17,7 @@ import ( var ( ErrNilArguments = errors.New("src and dst must not be nil") ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") - ErrNotSupported = errors.New("only structs and maps are supported") + ErrNotSupported = errors.New("only structs, maps, and slices are supported") ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") ErrNonPointerAgument = errors.New("dst must be a pointer") @@ -65,7 +65,7 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { return } vDst = reflect.ValueOf(dst).Elem() - if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { + if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice { err = ErrNotSupported return } diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml index 0af08e65e6821..7a008a4d23ebb 100644 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -3,7 +3,7 @@ before: hooks: - ./gen.sh - - go install mvdan.cc/garble@latest + - go install mvdan.cc/garble@v0.9.3 builds: - diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 9ec000ffaa9d3..958666ed89117 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -9,7 +9,6 @@ This package provides various compression algorithms. * [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding. * [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently. * [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation. -* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here. [![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories) [![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) @@ -17,6 +16,28 @@ This package provides various compression algorithms. # changelog +* Jan 21st, 2023 (v1.15.15) + * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 + * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 + * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745 + * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740 + +* Jan 3rd, 2023 (v1.15.14) + + * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718 + * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720 + * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722 + * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723 + +* Dec 11, 2022 (v1.15.13) + * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 + * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 + +* Oct 26, 2022 (v1.15.12) + + * zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680 + * gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683 + * Sept 26, 2022 (v1.15.11) * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678 diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go index 6f341914c67f0..dac97e58a2db1 100644 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -146,54 +146,51 @@ func (s *Scratch) compress(src []byte) error { c1.encodeZero(tt[src[ip-2]]) ip -= 2 } + src = src[:ip] // Main compression loop. switch { case !s.zeroBits && s.actualTableLog <= 8: // We can encode 4 symbols without requiring a flush. // We do not need to check if any output is 0 bits. - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encode(tt[v0]) c1.encode(tt[v1]) c2.encode(tt[v2]) c1.encode(tt[v3]) - ip -= 4 } case !s.zeroBits: // We do not need to check if any output is 0 bits. - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encode(tt[v0]) c1.encode(tt[v1]) s.bw.flush32() c2.encode(tt[v2]) c1.encode(tt[v3]) - ip -= 4 } case s.actualTableLog <= 8: // We can encode 4 symbols without requiring a flush - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encodeZero(tt[v0]) c1.encodeZero(tt[v1]) c2.encodeZero(tt[v2]) c1.encodeZero(tt[v3]) - ip -= 4 } default: - for ip >= 4 { + for ; len(src) >= 4; src = src[:len(src)-4] { s.bw.flush32() - v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1] + v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] c2.encodeZero(tt[v0]) c1.encodeZero(tt[v1]) s.bw.flush32() c2.encodeZero(tt[v2]) c1.encodeZero(tt[v3]) - ip -= 4 } } @@ -459,15 +456,17 @@ func (s *Scratch) countSimple(in []byte) (max int) { for _, v := range in { s.count[v]++ } - m := uint32(0) + m, symlen := uint32(0), s.symbolLen for i, v := range s.count[:] { + if v == 0 { + continue + } if v > m { m = v } - if v > 0 { - s.symbolLen = uint16(i) + 1 - } + symlen = uint16(i) + 1 } + s.symbolLen = symlen return int(m) } diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go index 504a7be9dae3d..e36d9742f9437 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -67,7 +67,6 @@ func (b *bitReaderBytes) fillFast() { // 2 bounds checks. v := b.in[b.off-4 : b.off] - v = v[:4] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << (b.bitsRead - 32) b.bitsRead -= 32 @@ -88,8 +87,7 @@ func (b *bitReaderBytes) fill() { return } if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] + v := b.in[b.off-4 : b.off] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << (b.bitsRead - 32) b.bitsRead -= 32 @@ -179,7 +177,6 @@ func (b *bitReaderShifted) fillFast() { // 2 bounds checks. v := b.in[b.off-4 : b.off] - v = v[:4] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.bitsRead -= 32 @@ -200,8 +197,7 @@ func (b *bitReaderShifted) fill() { return } if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] + v := b.in[b.off-4 : b.off] low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.bitsRead -= 32 diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index 4d14542facf38..cdc94856f2cbd 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -365,29 +365,29 @@ func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { m := uint32(0) if len(s.prevTable) > 0 { for i, v := range s.count[:] { + if v == 0 { + continue + } if v > m { m = v } - if v > 0 { - s.symbolLen = uint16(i) + 1 - if i >= len(s.prevTable) { - reuse = false - } else { - if s.prevTable[i].nBits == 0 { - reuse = false - } - } + s.symbolLen = uint16(i) + 1 + if i >= len(s.prevTable) { + reuse = false + } else if s.prevTable[i].nBits == 0 { + reuse = false } } return int(m), reuse } for i, v := range s.count[:] { + if v == 0 { + continue + } if v > m { m = v } - if v > 0 { - s.symbolLen = uint16(i) + 1 - } + s.symbolLen = uint16(i) + 1 } return int(m), false } @@ -484,34 +484,35 @@ func (s *Scratch) buildCTable() error { // Different from reference implementation. huffNode0 := s.nodes[0 : huffNodesLen+1] - for huffNode[nonNullRank].count == 0 { + for huffNode[nonNullRank].count() == 0 { nonNullRank-- } lowS := int16(nonNullRank) nodeRoot := nodeNb + lowS - 1 lowN := nodeNb - huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count - huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb) + huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count()) + huffNode[lowS].setParent(nodeNb) + huffNode[lowS-1].setParent(nodeNb) nodeNb++ lowS -= 2 for n := nodeNb; n <= nodeRoot; n++ { - huffNode[n].count = 1 << 30 + huffNode[n].setCount(1 << 30) } // fake entry, strong barrier - huffNode0[0].count = 1 << 31 + huffNode0[0].setCount(1 << 31) // create parents for nodeNb <= nodeRoot { var n1, n2 int16 - if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { n1 = lowS lowS-- } else { n1 = lowN lowN++ } - if huffNode0[lowS+1].count < huffNode0[lowN+1].count { + if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { n2 = lowS lowS-- } else { @@ -519,18 +520,19 @@ func (s *Scratch) buildCTable() error { lowN++ } - huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count - huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb) + huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count()) + huffNode0[n1+1].setParent(nodeNb) + huffNode0[n2+1].setParent(nodeNb) nodeNb++ } // distribute weights (unlimited tree height) - huffNode[nodeRoot].nbBits = 0 + huffNode[nodeRoot].setNbBits(0) for n := nodeRoot - 1; n >= startNode; n-- { - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) } for n := uint16(0); n <= nonNullRank; n++ { - huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1 + huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) } s.actualTableLog = s.setMaxHeight(int(nonNullRank)) maxNbBits := s.actualTableLog @@ -542,7 +544,7 @@ func (s *Scratch) buildCTable() error { var nbPerRank [tableLogMax + 1]uint16 var valPerRank [16]uint16 for _, v := range huffNode[:nonNullRank+1] { - nbPerRank[v.nbBits]++ + nbPerRank[v.nbBits()]++ } // determine stating value per rank { @@ -557,7 +559,7 @@ func (s *Scratch) buildCTable() error { // push nbBits per symbol, symbol order for _, v := range huffNode[:nonNullRank+1] { - s.cTable[v.symbol].nBits = v.nbBits + s.cTable[v.symbol()].nBits = v.nbBits() } // assign value within rank, symbol order @@ -603,12 +605,12 @@ func (s *Scratch) huffSort() { pos := rank[r].current rank[r].current++ prev := nodes[(pos-1)&huffNodesMask] - for pos > rank[r].base && c > prev.count { + for pos > rank[r].base && c > prev.count() { nodes[pos&huffNodesMask] = prev pos-- prev = nodes[(pos-1)&huffNodesMask] } - nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)} + nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n)) } } @@ -617,7 +619,7 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { huffNode := s.nodes[1 : huffNodesLen+1] //huffNode = huffNode[: huffNodesLen] - largestBits := huffNode[lastNonNull].nbBits + largestBits := huffNode[lastNonNull].nbBits() // early exit : no elt > maxNbBits if largestBits <= maxNbBits { @@ -627,14 +629,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { baseCost := int(1) << (largestBits - maxNbBits) n := uint32(lastNonNull) - for huffNode[n].nbBits > maxNbBits { - totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)) - huffNode[n].nbBits = maxNbBits + for huffNode[n].nbBits() > maxNbBits { + totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits())) + huffNode[n].setNbBits(maxNbBits) n-- } // n stops at huffNode[n].nbBits <= maxNbBits - for huffNode[n].nbBits == maxNbBits { + for huffNode[n].nbBits() == maxNbBits { n-- } // n end at index of smallest symbol using < maxNbBits @@ -655,10 +657,10 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { { currentNbBits := maxNbBits for pos := int(n); pos >= 0; pos-- { - if huffNode[pos].nbBits >= currentNbBits { + if huffNode[pos].nbBits() >= currentNbBits { continue } - currentNbBits = huffNode[pos].nbBits // < maxNbBits + currentNbBits = huffNode[pos].nbBits() // < maxNbBits rankLast[maxNbBits-currentNbBits] = uint32(pos) } } @@ -675,8 +677,8 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { if lowPos == noSymbol { break } - highTotal := huffNode[highPos].count - lowTotal := 2 * huffNode[lowPos].count + highTotal := huffNode[highPos].count() + lowTotal := 2 * huffNode[lowPos].count() if highTotal <= lowTotal { break } @@ -692,13 +694,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { // this rank is no longer empty rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] } - huffNode[rankLast[nBitsToDecrease]].nbBits++ + huffNode[rankLast[nBitsToDecrease]].setNbBits(1 + + huffNode[rankLast[nBitsToDecrease]].nbBits()) if rankLast[nBitsToDecrease] == 0 { /* special case, reached largest symbol */ rankLast[nBitsToDecrease] = noSymbol } else { rankLast[nBitsToDecrease]-- - if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease { + if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease { rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ } } @@ -706,15 +709,15 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { for totalCost < 0 { /* Sometimes, cost correction overshoot */ if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ - for huffNode[n].nbBits == maxNbBits { + for huffNode[n].nbBits() == maxNbBits { n-- } - huffNode[n+1].nbBits-- + huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1) rankLast[1] = n + 1 totalCost++ continue } - huffNode[rankLast[1]+1].nbBits-- + huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1) rankLast[1]++ totalCost++ } @@ -722,9 +725,26 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { return maxNbBits } -type nodeElt struct { - count uint32 - parent uint16 - symbol byte - nbBits uint8 +// A nodeElt is the fields +// +// count uint32 +// parent uint16 +// symbol byte +// nbBits uint8 +// +// in some order, all squashed into an integer so that the compiler +// always loads and stores entire nodeElts instead of separate fields. +type nodeElt uint64 + +func makeNodeElt(count uint32, symbol byte) nodeElt { + return nodeElt(count) | nodeElt(symbol)<<48 } + +func (e *nodeElt) count() uint32 { return uint32(*e) } +func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) } +func (e *nodeElt) symbol() byte { return byte(*e >> 48) } +func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) } + +func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) } +func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 } +func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 42a237eac4abd..3c0b398c72e6e 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -61,7 +61,7 @@ func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { b, err := fse.Decompress(in[:iSize], s.fse) s.fse.Out = nil if err != nil { - return s, nil, err + return s, nil, fmt.Errorf("fse decompress returned: %w", err) } if len(b) > 255 { return s, nil, errors.New("corrupt input: output table too large") diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s index 8d2187a2ce6a3..c4c7ab2d1fe35 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s @@ -4,360 +4,349 @@ // func decompress4x_main_loop_amd64(ctx *decompress4xContext) TEXT ·decompress4x_main_loop_amd64(SB), $0-8 - XORQ DX, DX - // Preload values MOVQ ctx+0(FP), AX MOVBQZX 8(AX), DI - MOVQ 16(AX), SI - MOVQ 48(AX), BX - MOVQ 24(AX), R9 - MOVQ 32(AX), R10 - MOVQ (AX), R11 + MOVQ 16(AX), BX + MOVQ 48(AX), SI + MOVQ 24(AX), R8 + MOVQ 32(AX), R9 + MOVQ (AX), R10 // Main loop main_loop: - MOVQ SI, R8 - CMPQ R8, BX + XORL DX, DX + CMPQ BX, SI SETGE DL // br0.fillFast32() - MOVQ 32(R11), R12 - MOVBQZX 40(R11), R13 - CMPQ R13, $0x20 + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 JBE skip_fill0 - MOVQ 24(R11), AX - SUBQ $0x20, R13 + MOVQ 24(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ (R11), R14 + MOVQ (R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 24(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 24(R10) + ORQ R13, R11 - // exhausted = exhausted || (br0.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br0.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill0: // val0 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br0.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + MOVW AX, (BX) // update the bitreader structure - MOVQ R12, 32(R11) - MOVB R13, 40(R11) - ADDQ R9, R8 + MOVQ R11, 32(R10) + MOVB R12, 40(R10) // br1.fillFast32() - MOVQ 80(R11), R12 - MOVBQZX 88(R11), R13 - CMPQ R13, $0x20 + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 JBE skip_fill1 - MOVQ 72(R11), AX - SUBQ $0x20, R13 + MOVQ 72(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ 48(R11), R14 + MOVQ 48(R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 72(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 72(R10) + ORQ R13, R11 - // exhausted = exhausted || (br1.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br1.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill1: // val0 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br1.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + MOVW AX, (BX)(R8*1) // update the bitreader structure - MOVQ R12, 80(R11) - MOVB R13, 88(R11) - ADDQ R9, R8 + MOVQ R11, 80(R10) + MOVB R12, 88(R10) // br2.fillFast32() - MOVQ 128(R11), R12 - MOVBQZX 136(R11), R13 - CMPQ R13, $0x20 + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 JBE skip_fill2 - MOVQ 120(R11), AX - SUBQ $0x20, R13 + MOVQ 120(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ 96(R11), R14 + MOVQ 96(R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 120(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 120(R10) + ORQ R13, R11 - // exhausted = exhausted || (br2.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br2.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill2: // val0 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br2.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + MOVW AX, (BX)(R8*2) // update the bitreader structure - MOVQ R12, 128(R11) - MOVB R13, 136(R11) - ADDQ R9, R8 + MOVQ R11, 128(R10) + MOVB R12, 136(R10) // br3.fillFast32() - MOVQ 176(R11), R12 - MOVBQZX 184(R11), R13 - CMPQ R13, $0x20 + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 JBE skip_fill3 - MOVQ 168(R11), AX - SUBQ $0x20, R13 + MOVQ 168(R10), AX + SUBQ $0x20, R12 SUBQ $0x04, AX - MOVQ 144(R11), R14 + MOVQ 144(R10), R13 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R14*1), R14 - MOVQ R13, CX - SHLQ CL, R14 - MOVQ AX, 168(R11) - ORQ R14, R12 + MOVL (AX)(R13*1), R13 + MOVQ R12, CX + SHLQ CL, R13 + MOVQ AX, 168(R10) + ORQ R13, R11 - // exhausted = exhausted || (br3.off < 4) - CMPQ AX, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br3.off < 4) + CMPQ AX, $0x04 + ADCB $+0, DL skip_fill3: // val0 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br3.peekTopBits(peekBits) MOVQ DI, CX - MOVQ R12, R14 - SHRQ CL, R14 + MOVQ R11, R13 + SHRQ CL, R13 // v1 := table[val1&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v1.entry)) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // these two writes get coalesced // out[id * dstEvery + 0] = uint8(v0.entry >> 8) // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (R8) + LEAQ (R8)(R8*2), CX + MOVW AX, (BX)(CX*1) // update the bitreader structure - MOVQ R12, 176(R11) - MOVB R13, 184(R11) - ADDQ $0x02, SI + MOVQ R11, 176(R10) + MOVB R12, 184(R10) + ADDQ $0x02, BX TESTB DL, DL JZ main_loop MOVQ ctx+0(FP), AX - SUBQ 16(AX), SI - SHLQ $0x02, SI - MOVQ SI, 40(AX) + SUBQ 16(AX), BX + SHLQ $0x02, BX + MOVQ BX, 40(AX) RET // func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 - XORQ DX, DX - // Preload values MOVQ ctx+0(FP), CX MOVBQZX 8(CX), DI MOVQ 16(CX), BX MOVQ 48(CX), SI - MOVQ 24(CX), R9 - MOVQ 32(CX), R10 - MOVQ (CX), R11 + MOVQ 24(CX), R8 + MOVQ 32(CX), R9 + MOVQ (CX), R10 // Main loop main_loop: - MOVQ BX, R8 - CMPQ R8, SI + XORL DX, DX + CMPQ BX, SI SETGE DL // br0.fillFast32() - MOVQ 32(R11), R12 - MOVBQZX 40(R11), R13 - CMPQ R13, $0x20 + MOVQ 32(R10), R11 + MOVBQZX 40(R10), R12 + CMPQ R12, $0x20 JBE skip_fill0 - MOVQ 24(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ (R11), R15 + MOVQ 24(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ (R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 24(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 24(R10) + ORQ R14, R11 - // exhausted = exhausted || (br0.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br0.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill0: // val0 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br0.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br0.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -365,88 +354,86 @@ skip_fill0: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + MOVL AX, (BX) // update the bitreader structure - MOVQ R12, 32(R11) - MOVB R13, 40(R11) - ADDQ R9, R8 + MOVQ R11, 32(R10) + MOVB R12, 40(R10) // br1.fillFast32() - MOVQ 80(R11), R12 - MOVBQZX 88(R11), R13 - CMPQ R13, $0x20 + MOVQ 80(R10), R11 + MOVBQZX 88(R10), R12 + CMPQ R12, $0x20 JBE skip_fill1 - MOVQ 72(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 48(R11), R15 + MOVQ 72(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 48(R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 72(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 72(R10) + ORQ R14, R11 - // exhausted = exhausted || (br1.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br1.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill1: // val0 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br1.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br1.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -454,88 +441,86 @@ skip_fill1: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + MOVL AX, (BX)(R8*1) // update the bitreader structure - MOVQ R12, 80(R11) - MOVB R13, 88(R11) - ADDQ R9, R8 + MOVQ R11, 80(R10) + MOVB R12, 88(R10) // br2.fillFast32() - MOVQ 128(R11), R12 - MOVBQZX 136(R11), R13 - CMPQ R13, $0x20 + MOVQ 128(R10), R11 + MOVBQZX 136(R10), R12 + CMPQ R12, $0x20 JBE skip_fill2 - MOVQ 120(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 96(R11), R15 + MOVQ 120(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 96(R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 120(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 120(R10) + ORQ R14, R11 - // exhausted = exhausted || (br2.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br2.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill2: // val0 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br2.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br2.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -543,88 +528,86 @@ skip_fill2: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + MOVL AX, (BX)(R8*2) // update the bitreader structure - MOVQ R12, 128(R11) - MOVB R13, 136(R11) - ADDQ R9, R8 + MOVQ R11, 128(R10) + MOVB R12, 136(R10) // br3.fillFast32() - MOVQ 176(R11), R12 - MOVBQZX 184(R11), R13 - CMPQ R13, $0x20 + MOVQ 176(R10), R11 + MOVBQZX 184(R10), R12 + CMPQ R12, $0x20 JBE skip_fill3 - MOVQ 168(R11), R14 - SUBQ $0x20, R13 - SUBQ $0x04, R14 - MOVQ 144(R11), R15 + MOVQ 168(R10), R13 + SUBQ $0x20, R12 + SUBQ $0x04, R13 + MOVQ 144(R10), R14 // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R14)(R15*1), R15 - MOVQ R13, CX - SHLQ CL, R15 - MOVQ R14, 168(R11) - ORQ R15, R12 + MOVL (R13)(R14*1), R14 + MOVQ R12, CX + SHLQ CL, R14 + MOVQ R13, 168(R10) + ORQ R14, R11 - // exhausted = exhausted || (br3.off < 4) - CMPQ R14, $0x04 - SETLT AL - ORB AL, DL + // exhausted += (br3.off < 4) + CMPQ R13, $0x04 + ADCB $+0, DL skip_fill3: // val0 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v0 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v0.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val1 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v1 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v1.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // val2 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v2 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v2.entry) MOVB CH, AH - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 // val3 := br3.peekTopBits(peekBits) - MOVQ R12, R14 + MOVQ R11, R13 MOVQ DI, CX - SHRQ CL, R14 + SHRQ CL, R13 // v3 := table[val0&mask] - MOVW (R10)(R14*2), CX + MOVW (R9)(R13*2), CX // br3.advance(uint8(v3.entry) MOVB CH, AL - SHLQ CL, R12 - ADDB CL, R13 + SHLQ CL, R11 + ADDB CL, R12 BSWAPL AX // these four writes get coalesced @@ -632,11 +615,12 @@ skip_fill3: // out[id * dstEvery + 1] = uint8(v1.entry >> 8) // out[id * dstEvery + 3] = uint8(v2.entry >> 8) // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (R8) + LEAQ (R8)(R8*2), CX + MOVL AX, (BX)(CX*1) // update the bitreader structure - MOVQ R12, 176(R11) - MOVB R13, 184(R11) + MOVQ R11, 176(R10) + MOVB R12, 184(R10) ADDQ $0x04, BX TESTB DL, DL JZ main_loop @@ -652,7 +636,7 @@ TEXT ·decompress1x_main_loop_amd64(SB), $0-8 MOVQ 16(CX), DX MOVQ 24(CX), BX CMPQ BX, $0x04 - JB error_max_decoded_size_exeeded + JB error_max_decoded_size_exceeded LEAQ (DX)(BX*1), BX MOVQ (CX), SI MOVQ (SI), R8 @@ -667,7 +651,7 @@ main_loop: // Check if we have room for 4 bytes in the output buffer LEAQ 4(DX), CX CMPQ CX, BX - JGE error_max_decoded_size_exeeded + JGE error_max_decoded_size_exceeded // Decode 4 values CMPQ R11, $0x20 @@ -744,7 +728,7 @@ loop_condition: RET // Report error -error_max_decoded_size_exeeded: +error_max_decoded_size_exceeded: MOVQ ctx+0(FP), AX MOVQ $-1, CX MOVQ CX, 40(AX) @@ -757,7 +741,7 @@ TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 MOVQ 16(CX), DX MOVQ 24(CX), BX CMPQ BX, $0x04 - JB error_max_decoded_size_exeeded + JB error_max_decoded_size_exceeded LEAQ (DX)(BX*1), BX MOVQ (CX), SI MOVQ (SI), R8 @@ -772,7 +756,7 @@ main_loop: // Check if we have room for 4 bytes in the output buffer LEAQ 4(DX), CX CMPQ CX, BX - JGE error_max_decoded_size_exeeded + JGE error_max_decoded_size_exceeded // Decode 4 values CMPQ R11, $0x20 @@ -839,7 +823,7 @@ loop_condition: RET // Report error -error_max_decoded_size_exeeded: +error_max_decoded_size_exceeded: MOVQ ctx+0(FP), AX MOVQ $-1, CX MOVQ CX, 40(AX) diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go index 298c4f8e97da8..05db94d39a4a0 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go @@ -103,6 +103,28 @@ func hash(u, shift uint32) uint32 { return (u * 0x1e35a7bd) >> shift } +// EncodeBlockInto exposes encodeBlock but checks dst size. +func EncodeBlockInto(dst, src []byte) (d int) { + if MaxEncodedLen(len(src)) > len(dst) { + return 0 + } + + // encodeBlock breaks on too big blocks, so split. + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return d +} + // encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It // assumes that the varint-encoded length of the decompressed bytes has already // been written. diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index da814715da0d6..2445bb4fe5b4e 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -82,8 +82,9 @@ type blockDec struct { err error - // Check against this crc - checkCRC []byte + // Check against this crc, if hasCRC is true. + checkCRC uint32 + hasCRC bool // Frame to use for singlethreaded decoding. // Should not be used by the decoder itself since parent may be another frame. @@ -191,16 +192,14 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } // Read block data. - if cap(b.dataStorage) < cSize { + if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize { + // byteBuf doesn't need a destination buffer. if b.lowMem || cSize > maxCompressedBlockSize { b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) } else { b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) } } - if cap(b.dst) <= maxSize { - b.dst = make([]byte, 0, maxSize+1) - } b.data, err = br.readBig(cSize, b.dataStorage) if err != nil { if debugDecoder { @@ -209,6 +208,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { } return err } + if cap(b.dst) <= maxSize { + b.dst = make([]byte, 0, maxSize+1) + } return nil } diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go index 5022e71c83630..f6a240970d467 100644 --- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go +++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go @@ -4,7 +4,6 @@ package zstd import ( - "bytes" "encoding/binary" "errors" "io" @@ -102,8 +101,8 @@ func (h *Header) Decode(in []byte) error { } h.HeaderSize += 4 b, in := in[:4], in[4:] - if !bytes.Equal(b, frameMagic) { - if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 { + if string(b) != frameMagic { + if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { return ErrMagicMismatch } if len(in) < 4 { @@ -153,7 +152,7 @@ func (h *Header) Decode(in []byte) error { } b, in = in[:size], in[size:] h.HeaderSize += int(size) - switch size { + switch len(b) { case 1: h.DictionaryID = uint32(b[0]) case 2: @@ -183,7 +182,7 @@ func (h *Header) Decode(in []byte) error { } b, in = in[:fcsSize], in[fcsSize:] h.HeaderSize += int(fcsSize) - switch fcsSize { + switch len(b) { case 1: h.FrameContentSize = uint64(b[0]) case 2: diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index 74d645f7c3802..7113e69ee3a1e 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -5,7 +5,6 @@ package zstd import ( - "bytes" "context" "encoding/binary" "io" @@ -41,8 +40,7 @@ type Decoder struct { frame *frameDec // Custom dictionaries. - // Always uses copies. - dicts map[uint32]dict + dicts map[uint32]*dict // streamWg is the waitgroup for all streams streamWg sync.WaitGroup @@ -104,7 +102,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { } // Transfer option dicts. - d.dicts = make(map[uint32]dict, len(d.o.dicts)) + d.dicts = make(map[uint32]*dict, len(d.o.dicts)) for _, dc := range d.o.dicts { d.dicts[dc.id] = dc } @@ -342,15 +340,8 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { } return dst, err } - if frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - return nil, ErrUnknownDictionary - } - if debugDecoder { - println("setting dict", frame.DictionaryID) - } - frame.history.setDict(&dict) + if err = d.setDict(frame); err != nil { + return nil, err } if frame.WindowSize > d.o.maxWindowSize { if debugDecoder { @@ -459,7 +450,11 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) { println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) } - if !d.o.ignoreChecksum && len(next.b) > 0 { + if d.o.ignoreChecksum { + return true + } + + if len(next.b) > 0 { n, err := d.current.crc.Write(next.b) if err == nil { if n != len(next.b) { @@ -467,18 +462,16 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) { } } } - if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 { - got := d.current.crc.Sum64() - var tmp [4]byte - binary.LittleEndian.PutUint32(tmp[:], uint32(got)) - if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) { + if next.err == nil && next.d != nil && next.d.hasCRC { + got := uint32(d.current.crc.Sum64()) + if got != next.d.checkCRC { if debugDecoder { - println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)") + printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC) } d.current.err = ErrCRCMismatch } else { if debugDecoder { - println("CRC ok", tmp[:]) + printf("CRC ok %08x\n", got) } } } @@ -494,18 +487,12 @@ func (d *Decoder) nextBlockSync() (ok bool) { if !d.syncStream.inFrame { d.frame.history.reset() d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err == nil { + d.current.err = d.setDict(d.frame) + } if d.current.err != nil { return false } - if d.frame.DictionaryID != nil { - dict, ok := d.dicts[*d.frame.DictionaryID] - if !ok { - d.current.err = ErrUnknownDictionary - return false - } else { - d.frame.history.setDict(&dict) - } - } if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { d.current.err = ErrDecoderSizeExceeded return false @@ -864,13 +851,8 @@ decodeStream: if debugDecoder && err != nil { println("Frame decoder returned", err) } - if err == nil && frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - err = ErrUnknownDictionary - } else { - frame.history.setDict(&dict) - } + if err == nil { + err = d.setDict(frame) } if err == nil && d.frame.WindowSize > d.o.maxWindowSize { if debugDecoder { @@ -918,18 +900,22 @@ decodeStream: println("next block returned error:", err) } dec.err = err - dec.checkCRC = nil + dec.hasCRC = false if dec.Last && frame.HasCheckSum && err == nil { crc, err := frame.rawInput.readSmall(4) - if err != nil { + if len(crc) < 4 { + if err == nil { + err = io.ErrUnexpectedEOF + + } println("CRC missing?", err) dec.err = err - } - var tmp [4]byte - copy(tmp[:], crc) - dec.checkCRC = tmp[:] - if debugDecoder { - println("found crc to check:", dec.checkCRC) + } else { + dec.checkCRC = binary.LittleEndian.Uint32(crc) + dec.hasCRC = true + if debugDecoder { + printf("found crc to check: %08x\n", dec.checkCRC) + } } } err = dec.err @@ -948,3 +934,20 @@ decodeStream: hist.reset() d.frame.history.b = frameHistCache } + +func (d *Decoder) setDict(frame *frameDec) (err error) { + dict, ok := d.dicts[frame.DictionaryID] + if ok { + if debugDecoder { + println("setting dict", frame.DictionaryID) + } + frame.history.setDict(dict) + } else if frame.DictionaryID != 0 { + // A zero or missing dictionary id is ambiguous: + // either dictionary zero, or no dictionary. In particular, + // zstd --patch-from uses this id for the source file, + // so only return an error if the dictionary id is not zero. + err = ErrUnknownDictionary + } + return err +} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index f42448e69c951..07a90dd7af309 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -6,6 +6,8 @@ package zstd import ( "errors" + "fmt" + "math/bits" "runtime" ) @@ -18,7 +20,7 @@ type decoderOptions struct { concurrent int maxDecodedSize uint64 maxWindowSize uint64 - dicts []dict + dicts []*dict ignoreChecksum bool limitToCap bool decodeBufsBelow int @@ -85,7 +87,13 @@ func WithDecoderMaxMemory(n uint64) DOption { } // WithDecoderDicts allows to register one or more dictionaries for the decoder. -// If several dictionaries with the same ID is provided the last one will be used. +// +// Each slice in dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// +// If several dictionaries with the same ID are provided, the last one will be used. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format func WithDecoderDicts(dicts ...[]byte) DOption { return func(o *decoderOptions) error { for _, b := range dicts { @@ -93,12 +101,24 @@ func WithDecoderDicts(dicts ...[]byte) DOption { if err != nil { return err } - o.dicts = append(o.dicts, *d) + o.dicts = append(o.dicts, d) } return nil } } +// WithEncoderDictRaw registers a dictionary that may be used by the decoder. +// The slice content can be arbitrary data. +func WithDecoderDictRaw(id uint32, content []byte) DOption { + return func(o *decoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}) + return nil + } +} + // WithDecoderMaxWindow allows to set a maximum window size for decodes. // This allows rejecting packets that will cause big memory usage. // The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index a36ae83ef579d..ca0951452e61b 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -1,7 +1,6 @@ package zstd import ( - "bytes" "encoding/binary" "errors" "fmt" @@ -20,7 +19,10 @@ type dict struct { content []byte } -var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec} +const dictMagic = "\x37\xa4\x30\xec" + +// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB. +const dictMaxLength = 1 << 31 // ID returns the dictionary id or 0 if d is nil. func (d *dict) ID() uint32 { @@ -30,14 +32,38 @@ func (d *dict) ID() uint32 { return d.id } -// DictContentSize returns the dictionary content size or 0 if d is nil. -func (d *dict) DictContentSize() int { +// ContentSize returns the dictionary content size or 0 if d is nil. +func (d *dict) ContentSize() int { if d == nil { return 0 } return len(d.content) } +// Content returns the dictionary content. +func (d *dict) Content() []byte { + if d == nil { + return nil + } + return d.content +} + +// Offsets returns the initial offsets. +func (d *dict) Offsets() [3]int { + if d == nil { + return [3]int{} + } + return d.offsets +} + +// LitEncoder returns the literal encoder. +func (d *dict) LitEncoder() *huff0.Scratch { + if d == nil { + return nil + } + return d.litEnc +} + // Load a dictionary as described in // https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format func loadDict(b []byte) (*dict, error) { @@ -50,7 +76,7 @@ func loadDict(b []byte) (*dict, error) { ofDec: sequenceDec{fse: &fseDecoder{}}, mlDec: sequenceDec{fse: &fseDecoder{}}, } - if !bytes.Equal(b[:4], dictMagic[:]) { + if string(b[:4]) != dictMagic { return nil, ErrMagicMismatch } d.id = binary.LittleEndian.Uint32(b[4:8]) @@ -62,7 +88,7 @@ func loadDict(b []byte) (*dict, error) { var err error d.litEnc, b, err = huff0.ReadTable(b[8:], nil) if err != nil { - return nil, err + return nil, fmt.Errorf("loading literal table: %w", err) } d.litEnc.Reuse = huff0.ReusePolicyMust @@ -120,3 +146,16 @@ func loadDict(b []byte) (*dict, error) { return &d, nil } + +// InspectDictionary loads a zstd dictionary and provides functions to inspect the content. +func InspectDictionary(b []byte) (interface { + ID() uint32 + ContentSize() int + Content() []byte + Offsets() [3]int + LitEncoder() *huff0.Scratch +}, error) { + initPredefined() + d, err := loadDict(b) + return d, err +} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index 15ae8ee807749..e008b99298a58 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -16,6 +16,7 @@ type fastBase struct { cur int32 // maximum offset. Should be at least 2x block size. maxMatchOff int32 + bufferReset int32 hist []byte crc *xxhash.Digest tmp [8]byte @@ -56,8 +57,8 @@ func (e *fastBase) Block() *blockEnc { } func (e *fastBase) addBlock(src []byte) int32 { - if debugAsserts && e.cur > bufferReset { - panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset)) + if debugAsserts && e.cur > e.bufferReset { + panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset)) } // check if we have space already if len(e.hist)+len(src) > cap(e.hist) { @@ -126,24 +127,7 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 { panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) } } - a := src[s:] - b := src[t:] - b = b[:len(a)] - end := int32((len(a) >> 3) << 3) - for i := int32(0); i < end; i += 8 { - if diff := load6432(a, i) ^ load6432(b, i); diff != 0 { - return i + int32(bits.TrailingZeros64(diff)>>3) - } - } - - a = a[end:] - b = b[end:] - for i := range a { - if a[i] != b[i] { - return int32(i) + end - } - } - return int32(len(a)) + end + return int32(matchLen(src[s:], src[t:])) } // Reset the encoding table. @@ -165,13 +149,13 @@ func (e *fastBase) resetBase(d *dict, singleBlock bool) { if singleBlock { e.lowMem = true } - e.ensureHist(d.DictContentSize() + maxCompressedBlockSize) + e.ensureHist(d.ContentSize() + maxCompressedBlockSize) e.lowMem = low } // We offset current position so everything will be out of reach. // If above reset line, history will be purged. - if e.cur < bufferReset { + if e.cur < e.bufferReset { e.cur += e.maxMatchOff + int32(len(e.hist)) } e.hist = e.hist[:0] diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go index dbbb88d92b391..830f5ba74a222 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go @@ -85,14 +85,10 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = prevEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = prevEntry{} - } + e.table = [bestShortTableSize]prevEntry{} + e.longTable = [bestLongTableSize]prevEntry{} e.cur = e.maxMatchOff break } @@ -193,8 +189,8 @@ encodeLoop: panic("offset0 was 0") } - bestOf := func(a, b match) match { - if a.est+(a.s-b.s)*bitsPerByte>>10 < b.est+(b.s-a.s)*bitsPerByte>>10 { + bestOf := func(a, b *match) *match { + if a.est-b.est+(a.s-b.s)*bitsPerByte>>10 < 0 { return a } return b @@ -220,22 +216,26 @@ encodeLoop: return m } - best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)) + m1 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1) + m2 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1) + m3 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1) + m4 := matchAt(candidateS.prev-e.cur, s, uint32(cv), -1) + best := bestOf(bestOf(&m1, &m2), bestOf(&m3, &m4)) if canRepeat && best.length < goodEnough { cv32 := uint32(cv >> 8) spp := s + 1 - best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) - best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) - best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) + m1 := matchAt(spp-offset1, spp, cv32, 1) + m2 := matchAt(spp-offset2, spp, cv32, 2) + m3 := matchAt(spp-offset3, spp, cv32, 3) + best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3)) if best.length > 0 { cv32 = uint32(cv >> 24) spp += 2 - best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1)) - best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2)) - best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3)) + m1 := matchAt(spp-offset1, spp, cv32, 1) + m2 := matchAt(spp-offset2, spp, cv32, 2) + m3 := matchAt(spp-offset3, spp, cv32, 3) + best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3)) } } // Load next and check... @@ -262,26 +262,33 @@ encodeLoop: candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] // Short at s+1 - best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)) + m1 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1) // Long at s+1, s+2 - best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)) - best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)) - best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)) + m2 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1) + m3 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1) + m4 := matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1) + m5 := matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1) + best = bestOf(bestOf(bestOf(best, &m1), &m2), bestOf(bestOf(&m3, &m4), &m5)) if false { // Short at s+3. // Too often worse... - best = bestOf(best, matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)) + m := matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1) + best = bestOf(best, &m) } // See if we can find a better match by checking where the current best ends. // Use that offset to see if we can find a better full match. if sAt := best.s + best.length; sAt < sLimit { nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) candidateEnd := e.longTable[nextHashL] - if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 { - bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1)) - if pos := candidateEnd.prev - e.cur - best.length; pos >= 0 { - bestEnd = bestOf(bestEnd, matchAt(pos, best.s, load3232(src, best.s), -1)) + // Start check at a fixed offset to allow for a few mismatches. + // For this compression level 2 yields the best results. + const skipBeginning = 2 + if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 { + m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + bestEnd := bestOf(best, &m) + if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 { + m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) + bestEnd = bestOf(bestEnd, &m) } best = bestEnd } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index d70e3fd3d3e99..8582f31a7cc4c 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -62,14 +62,10 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = prevEntry{} - } + e.table = [betterShortTableSize]tableEntry{} + e.longTable = [betterLongTableSize]prevEntry{} e.cur = e.maxMatchOff break } @@ -587,7 +583,7 @@ func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { for i := range e.table[:] { e.table[i] = tableEntry{} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index 1f4a9a245563b..7d425109adb85 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -44,14 +44,10 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = tableEntry{} - } + e.table = [dFastShortTableSize]tableEntry{} + e.longTable = [dFastLongTableSize]tableEntry{} e.cur = e.maxMatchOff break } @@ -388,7 +384,7 @@ func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - if e.cur >= bufferReset { + if e.cur >= e.bufferReset { for i := range e.table[:] { e.table[i] = tableEntry{} } @@ -685,7 +681,7 @@ encodeLoop: } // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < bufferReset { + if e.cur < e.bufferReset { e.cur += int32(len(src)) } } @@ -700,7 +696,7 @@ func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { for i := range e.table[:] { e.table[i] = tableEntry{} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index 202636db05ea1..315b1a8f2f69c 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -43,7 +43,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { ) // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { for i := range e.table[:] { e.table[i] = tableEntry{} @@ -310,7 +310,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { } // Protect against e.cur wraparound. - if e.cur >= bufferReset { + if e.cur >= e.bufferReset { for i := range e.table[:] { e.table[i] = tableEntry{} } @@ -538,7 +538,7 @@ encodeLoop: println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) } // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < bufferReset { + if e.cur < e.bufferReset { e.cur += int32(len(src)) } } @@ -555,11 +555,9 @@ func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { return } // Protect against e.cur wraparound. - for e.cur >= bufferReset { + for e.cur >= e.bufferReset-int32(len(e.hist)) { if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } + e.table = [tableSize]tableEntry{} e.cur = e.maxMatchOff break } diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 7aaaedb23e58c..65c6c36dc13a2 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -8,6 +8,7 @@ import ( "crypto/rand" "fmt" "io" + "math" rdebug "runtime/debug" "sync" @@ -639,3 +640,37 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } return dst } + +// MaxEncodedSize returns the expected maximum +// size of an encoded block or stream. +func (e *Encoder) MaxEncodedSize(size int) int { + frameHeader := 4 + 2 // magic + frame header & window descriptor + if e.o.dict != nil { + frameHeader += 4 + } + // Frame content size: + if size < 256 { + frameHeader++ + } else if size < 65536+256 { + frameHeader += 2 + } else if size < math.MaxInt32 { + frameHeader += 4 + } else { + frameHeader += 8 + } + // Final crc + if e.o.crc { + frameHeader += 4 + } + + // Max overhead is 3 bytes/block. + // There cannot be 0 blocks. + blocks := (size + e.o.blockSize) / e.o.blockSize + + // Combine, add padding. + maxSz := frameHeader + 3*blocks + size + if e.o.pad > 1 { + maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad)) + } + return maxSz +} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index a7c5e1aac4323..8e15be2f7f8ad 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -3,6 +3,8 @@ package zstd import ( "errors" "fmt" + "math" + "math/bits" "runtime" "strings" ) @@ -47,22 +49,22 @@ func (o encoderOptions) encoder() encoder { switch o.level { case SpeedFastest: if o.dict != nil { - return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} } - return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} case SpeedDefault: if o.dict != nil { - return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}} + return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}} } - return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} case SpeedBetterCompression: if o.dict != nil { - return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}} + return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} } - return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} case SpeedBestCompression: - return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}} + return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} } panic("unknown compression level") } @@ -304,7 +306,13 @@ func WithLowerEncoderMem(b bool) EOption { } // WithEncoderDict allows to register a dictionary that will be used for the encode. +// +// The slice dict must be in the [dictionary format] produced by +// "zstd --train" from the Zstandard reference implementation. +// // The encoder *may* choose to use no dictionary instead for certain payloads. +// +// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format func WithEncoderDict(dict []byte) EOption { return func(o *encoderOptions) error { d, err := loadDict(dict) @@ -315,3 +323,17 @@ func WithEncoderDict(dict []byte) EOption { return nil } } + +// WithEncoderDictRaw registers a dictionary that may be used by the encoder. +// +// The slice content may contain arbitrary data. It will be used as an initial +// history. +func WithEncoderDictRaw(id uint32, content []byte) EOption { + return func(o *encoderOptions) error { + if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { + return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) + } + o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} + return nil + } +} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 2c0affcd85c74..d8e8a05bd73a1 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -5,7 +5,7 @@ package zstd import ( - "bytes" + "encoding/binary" "encoding/hex" "errors" "io" @@ -29,7 +29,7 @@ type frameDec struct { FrameContentSize uint64 - DictionaryID *uint32 + DictionaryID uint32 HasCheckSum bool SingleSegment bool } @@ -43,9 +43,9 @@ const ( MaxWindowSize = 1 << 29 ) -var ( - frameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd} - skippableFrameMagic = []byte{0x2a, 0x4d, 0x18} +const ( + frameMagic = "\x28\xb5\x2f\xfd" + skippableFrameMagic = "\x2a\x4d\x18" ) func newFrameDec(o decoderOptions) *frameDec { @@ -89,9 +89,9 @@ func (d *frameDec) reset(br byteBuffer) error { copy(signature[1:], b) } - if !bytes.Equal(signature[1:4], skippableFrameMagic) || signature[0]&0xf0 != 0x50 { + if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { if debugDecoder { - println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString(skippableFrameMagic)) + println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic))) } // Break if not skippable frame. break @@ -114,9 +114,9 @@ func (d *frameDec) reset(br byteBuffer) error { return err } } - if !bytes.Equal(signature[:], frameMagic) { + if string(signature[:]) != frameMagic { if debugDecoder { - println("Got magic numbers: ", signature, "want:", frameMagic) + println("Got magic numbers: ", signature, "want:", []byte(frameMagic)) } return ErrMagicMismatch } @@ -155,7 +155,7 @@ func (d *frameDec) reset(br byteBuffer) error { // Read Dictionary_ID // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - d.DictionaryID = nil + d.DictionaryID = 0 if size := fhd & 3; size != 0 { if size == 3 { size = 4 @@ -167,7 +167,7 @@ func (d *frameDec) reset(br byteBuffer) error { return err } var id uint32 - switch size { + switch len(b) { case 1: id = uint32(b[0]) case 2: @@ -178,11 +178,7 @@ func (d *frameDec) reset(br byteBuffer) error { if debugDecoder { println("Dict size", size, "ID:", id) } - if id > 0 { - // ID 0 means "sorry, no dictionary anyway". - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format - d.DictionaryID = &id - } + d.DictionaryID = id } // Read Frame_Content_Size @@ -204,7 +200,7 @@ func (d *frameDec) reset(br byteBuffer) error { println("Reading Frame content", err) return err } - switch fcsSize { + switch len(b) { case 1: d.FrameContentSize = uint64(b[0]) case 2: @@ -305,7 +301,7 @@ func (d *frameDec) checkCRC() error { } // We can overwrite upper tmp now - want, err := d.rawInput.readSmall(4) + buf, err := d.rawInput.readSmall(4) if err != nil { println("CRC missing?", err) return err @@ -315,22 +311,17 @@ func (d *frameDec) checkCRC() error { return nil } - var tmp [4]byte - got := d.crc.Sum64() - // Flip to match file order. - tmp[0] = byte(got >> 0) - tmp[1] = byte(got >> 8) - tmp[2] = byte(got >> 16) - tmp[3] = byte(got >> 24) + want := binary.LittleEndian.Uint32(buf[:4]) + got := uint32(d.crc.Sum64()) - if !bytes.Equal(tmp[:], want) { + if got != want { if debugDecoder { - println("CRC Check Failed:", tmp[:], "!=", want) + printf("CRC check failed: got %08x, want %08x\n", got, want) } return ErrCRCMismatch } if debugDecoder { - println("CRC ok", tmp[:]) + printf("CRC ok %08x\n", got) } return nil } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md index 69aa3bb587c8d..777290d44ced7 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md @@ -2,12 +2,7 @@ VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. - -[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) -[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) - -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a high-quality hashing algorithm that is much faster than anything in the Go standard library. @@ -28,31 +23,49 @@ func (*Digest) WriteString(string) (int, error) func (*Digest) Sum64() uint64 ``` -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. +The package is written with optimized pure Go and also contains even faster +assembly implementations for amd64 and arm64. If desired, the `purego` build tag +opts into using the Go code even on those architectures. + +[xxHash]: http://cyan4973.github.io/xxHash/ + +## Compatibility + +This package is in a module and the latest code is in version 2 of the module. +You need a version of Go with at least "minimal module compatibility" to use +github.com/cespare/xxhash/v2: + +* 1.9.7+ for Go 1.9 +* 1.10.3+ for Go 1.10 +* Go 1.11 or later + +I recommend using the latest release of Go. ## Benchmarks Here are some quick benchmarks comparing the pure-Go and assembly implementations of Sum64. -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | +| input size | purego | asm | +| ---------- | --------- | --------- | +| 4 B | 1.3 GB/s | 1.2 GB/s | +| 16 B | 2.9 GB/s | 3.5 GB/s | +| 100 B | 6.9 GB/s | 8.1 GB/s | +| 4 KB | 11.7 GB/s | 16.7 GB/s | +| 10 MB | 12.0 GB/s | 17.3 GB/s | -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: +These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C +CPU using the following commands under Go 1.19.2: ``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' +benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') +benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') ``` ## Projects using this package - [InfluxDB](https://github.com/influxdata/influxdb) - [Prometheus](https://github.com/prometheus/prometheus) +- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) - [FreeCache](https://github.com/coocood/freecache) +- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go index 2c112a0ab1c1b..fc40c820016ca 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go @@ -18,19 +18,11 @@ const ( prime5 uint64 = 2870177450012600261 ) -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) +// Store the primes in an array as well. +// +// The consts are used when possible in Go code to avoid MOVs but we need a +// contiguous array of the assembly code. +var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} // Digest implements hash.Hash64. type Digest struct { @@ -52,10 +44,10 @@ func New() *Digest { // Reset clears the Digest's state so that it can be reused. func (d *Digest) Reset() { - d.v1 = prime1v + prime2 + d.v1 = primes[0] + prime2 d.v2 = prime2 d.v3 = 0 - d.v4 = -prime1v + d.v4 = -primes[0] d.total = 0 d.n = 0 } @@ -71,21 +63,23 @@ func (d *Digest) Write(b []byte) (n int, err error) { n = len(b) d.total += uint64(n) + memleft := d.mem[d.n&(len(d.mem)-1):] + if d.n+n < 32 { // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) + copy(memleft, b) d.n += n return } if d.n > 0 { // Finish off the partial block. - copy(d.mem[d.n:], b) + c := copy(memleft, b) d.v1 = round(d.v1, u64(d.mem[0:8])) d.v2 = round(d.v2, u64(d.mem[8:16])) d.v3 = round(d.v3, u64(d.mem[16:24])) d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] + b = b[c:] d.n = 0 } @@ -135,21 +129,20 @@ func (d *Digest) Sum64() uint64 { h += d.total - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) + b := d.mem[:d.n&(len(d.mem)-1)] + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for i < end { - h ^= uint64(d.mem[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 - i++ } h ^= h >> 33 diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s index cea1785619709..ddb63aa91b14d 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s @@ -1,3 +1,4 @@ +//go:build !appengine && gc && !purego && !noasm // +build !appengine // +build gc // +build !purego @@ -5,212 +6,205 @@ #include "textflag.h" -// Register allocation: -// AX h -// SI pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// DI prime4v - -// round reads from and advances the buffer pointer in SI. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (SI), R12 \ - ADDQ $8, SI \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ DI, acc +// Registers: +#define h AX +#define d AX +#define p SI // pointer to advance through b +#define n DX +#define end BX // loop end +#define v1 R8 +#define v2 R9 +#define v3 R10 +#define v4 R11 +#define x R12 +#define prime1 R13 +#define prime2 R14 +#define prime4 DI + +#define round(acc, x) \ + IMULQ prime2, x \ + ADDQ x, acc \ + ROLQ $31, acc \ + IMULQ prime1, acc + +// round0 performs the operation x = round(0, x). +#define round0(x) \ + IMULQ prime2, x \ + ROLQ $31, x \ + IMULQ prime1, x + +// mergeRound applies a merge round on the two registers acc and x. +// It assumes that prime1, prime2, and prime4 have been loaded. +#define mergeRound(acc, x) \ + round0(x) \ + XORQ x, acc \ + IMULQ prime1, acc \ + ADDQ prime4, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that there is at least one block +// to process. +#define blockLoop() \ +loop: \ + MOVQ +0(p), x \ + round(v1, x) \ + MOVQ +8(p), x \ + round(v2, x) \ + MOVQ +16(p), x \ + round(v3, x) \ + MOVQ +24(p), x \ + round(v4, x) \ + ADDQ $32, p \ + CMPQ p, end \ + JLE loop // func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), DI + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 + MOVQ ·primes+24(SB), prime4 // Load slice. - MOVQ b_base+0(FP), SI - MOVQ b_len+8(FP), DX - LEAQ (SI)(DX*1), BX + MOVQ b_base+0(FP), p + MOVQ b_len+8(FP), n + LEAQ (p)(n*1), end // The first loop limit will be len(b)-32. - SUBQ $32, BX + SUBQ $32, end // Check whether we have at least one block. - CMPQ DX, $32 + CMPQ n, $32 JLT noBlocks // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until SI > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) + MOVQ prime1, v1 + ADDQ prime2, v1 + MOVQ prime2, v2 + XORQ v3, v3 + XORQ v4, v4 + SUBQ prime1, v4 + + blockLoop() + + MOVQ v1, h + ROLQ $1, h + MOVQ v2, x + ROLQ $7, x + ADDQ x, h + MOVQ v3, x + ROLQ $12, x + ADDQ x, h + MOVQ v4, x + ROLQ $18, x + ADDQ x, h + + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) JMP afterBlocks noBlocks: - MOVQ ·prime5v(SB), AX + MOVQ ·primes+32(SB), h afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8. - ADDQ $24, BX - - CMPQ SI, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (SI), R8 - ADDQ $8, SI - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ DI, AX - - CMPQ SI, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ SI, BX - JG singles - - MOVL (SI), R8 - ADDQ $4, SI - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ SI, BX + ADDQ n, h + + ADDQ $24, end + CMPQ p, end + JG try4 + +loop8: + MOVQ (p), x + ADDQ $8, p + round0(x) + XORQ x, h + ROLQ $27, h + IMULQ prime1, h + ADDQ prime4, h + + CMPQ p, end + JLE loop8 + +try4: + ADDQ $4, end + CMPQ p, end + JG try1 + + MOVL (p), x + ADDQ $4, p + IMULQ prime1, x + XORQ x, h + + ROLQ $23, h + IMULQ prime2, h + ADDQ ·primes+16(SB), h + +try1: + ADDQ $4, end + CMPQ p, end JGE finalize -singlesLoop: - MOVBQZX (SI), R12 - ADDQ $1, SI - IMULQ ·prime5v(SB), R12 - XORQ R12, AX +loop1: + MOVBQZX (p), x + ADDQ $1, p + IMULQ ·primes+32(SB), x + XORQ x, h + ROLQ $11, h + IMULQ prime1, h - ROLQ $11, AX - IMULQ R13, AX - - CMPQ SI, BX - JL singlesLoop + CMPQ p, end + JL loop1 finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) + MOVQ h, x + SHRQ $33, x + XORQ x, h + IMULQ prime2, h + MOVQ h, x + SHRQ $29, x + XORQ x, h + IMULQ ·primes+16(SB), h + MOVQ h, x + SHRQ $32, x + XORQ x, h + + MOVQ h, ret+24(FP) RET -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - // func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 + MOVQ ·primes+0(SB), prime1 + MOVQ ·primes+8(SB), prime2 // Load slice. - MOVQ b_base+8(FP), SI - MOVQ b_len+16(FP), DX - LEAQ (SI)(DX*1), BX - SUBQ $32, BX + MOVQ b_base+8(FP), p + MOVQ b_len+16(FP), n + LEAQ (p)(n*1), end + SUBQ $32, end // Load vN from d. - MOVQ d+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 + MOVQ s+0(FP), d + MOVQ 0(d), v1 + MOVQ 8(d), v2 + MOVQ 16(d), v3 + MOVQ 24(d), v4 // We don't need to check the loop condition here; this function is // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ SI, BX - JLE blockLoop + blockLoop() // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // The number of bytes written is SI minus the old base pointer. - SUBQ b_base+8(FP), SI - MOVQ SI, ret+32(FP) + MOVQ v1, 0(d) + MOVQ v2, 8(d) + MOVQ v3, 16(d) + MOVQ v4, 24(d) + + // The number of bytes written is p minus the old base pointer. + SUBQ b_base+8(FP), p + MOVQ p, ret+32(FP) RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s index 4d64a17d69c12..17901e080406d 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s @@ -1,13 +1,17 @@ -// +build gc,!purego,!noasm +//go:build !appengine && gc && !purego && !noasm +// +build !appengine +// +build gc +// +build !purego +// +build !noasm #include "textflag.h" -// Register allocation. +// Registers: #define digest R1 -#define h R2 // Return value. -#define p R3 // Input pointer. -#define len R4 -#define nblocks R5 // len / 32. +#define h R2 // return value +#define p R3 // input pointer +#define n R4 // input length +#define nblocks R5 // n / 32 #define prime1 R7 #define prime2 R8 #define prime3 R9 @@ -25,60 +29,52 @@ #define round(acc, x) \ MADD prime2, acc, x, acc \ ROR $64-31, acc \ - MUL prime1, acc \ + MUL prime1, acc -// x = round(0, x). +// round0 performs the operation x = round(0, x). #define round0(x) \ MUL prime2, x \ ROR $64-31, x \ - MUL prime1, x \ - -#define mergeRound(x) \ - round0(x) \ - EOR x, h \ - MADD h, prime4, prime1, h \ - -// Update v[1-4] with 32-byte blocks. Assumes len >= 32. -#define blocksLoop() \ - LSR $5, len, nblocks \ - PCALIGN $16 \ - loop: \ - LDP.P 32(p), (x1, x2) \ - round(v1, x1) \ - LDP -16(p), (x3, x4) \ - round(v2, x2) \ - SUB $1, nblocks \ - round(v3, x3) \ - round(v4, x4) \ - CBNZ nblocks, loop \ - -// The primes are repeated here to ensure that they're stored -// in a contiguous array, so we can load them with LDP. -DATA primes<> +0(SB)/8, $11400714785074694791 -DATA primes<> +8(SB)/8, $14029467366897019727 -DATA primes<>+16(SB)/8, $1609587929392839161 -DATA primes<>+24(SB)/8, $9650029242287828579 -DATA primes<>+32(SB)/8, $2870177450012600261 -GLOBL primes<>(SB), NOPTR+RODATA, $40 + MUL prime1, x + +#define mergeRound(acc, x) \ + round0(x) \ + EOR x, acc \ + MADD acc, prime4, prime1, acc + +// blockLoop processes as many 32-byte blocks as possible, +// updating v1, v2, v3, and v4. It assumes that n >= 32. +#define blockLoop() \ + LSR $5, n, nblocks \ + PCALIGN $16 \ + loop: \ + LDP.P 16(p), (x1, x2) \ + LDP.P 16(p), (x3, x4) \ + round(v1, x1) \ + round(v2, x2) \ + round(v3, x3) \ + round(v4, x4) \ + SUB $1, nblocks \ + CBNZ nblocks, loop // func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32 - LDP b_base+0(FP), (p, len) +TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 + LDP b_base+0(FP), (p, n) - LDP primes<> +0(SB), (prime1, prime2) - LDP primes<>+16(SB), (prime3, prime4) - MOVD primes<>+32(SB), prime5 + LDP ·primes+0(SB), (prime1, prime2) + LDP ·primes+16(SB), (prime3, prime4) + MOVD ·primes+32(SB), prime5 - CMP $32, len - CSEL LO, prime5, ZR, h // if len < 32 { h = prime5 } else { h = 0 } - BLO afterLoop + CMP $32, n + CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } + BLT afterLoop ADD prime1, prime2, v1 MOVD prime2, v2 MOVD $0, v3 NEG prime1, v4 - blocksLoop() + blockLoop() ROR $64-1, v1, x1 ROR $64-7, v2, x2 @@ -88,71 +84,75 @@ TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32 ADD x3, x4 ADD x2, x4, h - mergeRound(v1) - mergeRound(v2) - mergeRound(v3) - mergeRound(v4) + mergeRound(h, v1) + mergeRound(h, v2) + mergeRound(h, v3) + mergeRound(h, v4) afterLoop: - ADD len, h + ADD n, h - TBZ $4, len, try8 + TBZ $4, n, try8 LDP.P 16(p), (x1, x2) round0(x1) + + // NOTE: here and below, sequencing the EOR after the ROR (using a + // rotated register) is worth a small but measurable speedup for small + // inputs. ROR $64-27, h EOR x1 @> 64-27, h, h MADD h, prime4, prime1, h round0(x2) ROR $64-27, h - EOR x2 @> 64-27, h + EOR x2 @> 64-27, h, h MADD h, prime4, prime1, h try8: - TBZ $3, len, try4 + TBZ $3, n, try4 MOVD.P 8(p), x1 round0(x1) ROR $64-27, h - EOR x1 @> 64-27, h + EOR x1 @> 64-27, h, h MADD h, prime4, prime1, h try4: - TBZ $2, len, try2 + TBZ $2, n, try2 MOVWU.P 4(p), x2 MUL prime1, x2 ROR $64-23, h - EOR x2 @> 64-23, h + EOR x2 @> 64-23, h, h MADD h, prime3, prime2, h try2: - TBZ $1, len, try1 + TBZ $1, n, try1 MOVHU.P 2(p), x3 AND $255, x3, x1 LSR $8, x3, x2 MUL prime5, x1 ROR $64-11, h - EOR x1 @> 64-11, h + EOR x1 @> 64-11, h, h MUL prime1, h MUL prime5, x2 ROR $64-11, h - EOR x2 @> 64-11, h + EOR x2 @> 64-11, h, h MUL prime1, h try1: - TBZ $0, len, end + TBZ $0, n, finalize MOVBU (p), x4 MUL prime5, x4 ROR $64-11, h - EOR x4 @> 64-11, h + EOR x4 @> 64-11, h, h MUL prime1, h -end: +finalize: EOR h >> 33, h MUL prime2, h EOR h >> 29, h @@ -163,24 +163,22 @@ end: RET // func writeBlocks(d *Digest, b []byte) int -// -// Assumes len(b) >= 32. -TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40 - LDP primes<>(SB), (prime1, prime2) +TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 + LDP ·primes+0(SB), (prime1, prime2) // Load state. Assume v[1-4] are stored contiguously. MOVD d+0(FP), digest LDP 0(digest), (v1, v2) LDP 16(digest), (v3, v4) - LDP b_base+8(FP), (p, len) + LDP b_base+8(FP), (p, n) - blocksLoop() + blockLoop() // Store updated state. STP (v1, v2), 0(digest) STP (v3, v4), 16(digest) - BIC $31, len - MOVD len, ret+32(FP) + BIC $31, n + MOVD n, ret+32(FP) RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go index 1a1fac9c26130..d4221edf4fd61 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go @@ -13,4 +13,4 @@ package xxhash func Sum64(b []byte) uint64 //go:noescape -func writeBlocks(d *Digest, b []byte) int +func writeBlocks(s *Digest, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go index 209cb4a999c3a..0be16cefc7f4a 100644 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go +++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go @@ -15,10 +15,10 @@ func Sum64(b []byte) uint64 { var h uint64 if n >= 32 { - v1 := prime1v + prime2 + v1 := primes[0] + prime2 v2 := prime2 v3 := uint64(0) - v4 := -prime1v + v4 := -primes[0] for len(b) >= 32 { v1 = round(v1, u64(b[0:8:len(b)])) v2 = round(v2, u64(b[8:16:len(b)])) @@ -37,19 +37,18 @@ func Sum64(b []byte) uint64 { h += uint64(n) - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) + for ; len(b) >= 8; b = b[8:] { + k1 := round(0, u64(b[:8])) h ^= k1 h = rol27(h)*prime1 + prime4 } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + if len(b) >= 4 { + h ^= uint64(u32(b[:4])) * prime1 h = rol23(h)*prime2 + prime3 - i += 4 + b = b[4:] } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 + for ; len(b) > 0; b = b[1:] { + h ^= uint64(b[0]) * prime5 h = rol11(h) * prime1 } diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index 52e5703c26c46..b94993a072789 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -320,10 +320,6 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET - // Return with not enough output space error - MOVQ $0x00000005, ret+24(FP) - RET - // func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: CMOV TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 @@ -617,10 +613,6 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET - // Return with not enough output space error - MOVQ $0x00000005, ret+24(FP) - RET - // func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: BMI, BMI2, CMOV TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 @@ -897,10 +889,6 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET - // Return with not enough output space error - MOVQ $0x00000005, ret+24(FP) - RET - // func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int // Requires: BMI, BMI2, CMOV TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 @@ -1152,10 +1140,6 @@ error_not_enough_literals: MOVQ $0x00000004, ret+24(FP) RET - // Return with not enough output space error - MOVQ $0x00000005, ret+24(FP) - RET - // func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool // Requires: SSE TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 @@ -1389,8 +1373,7 @@ loop_finished: MOVQ ctx+0(FP), AX MOVQ DX, 24(AX) MOVQ DI, 104(AX) - MOVQ 80(AX), CX - SUBQ CX, SI + SUBQ 80(AX), SI MOVQ SI, 112(AX) RET @@ -1402,8 +1385,7 @@ error_match_off_too_big: MOVQ ctx+0(FP), AX MOVQ DX, 24(AX) MOVQ DI, 104(AX) - MOVQ 80(AX), CX - SUBQ CX, SI + SUBQ 80(AX), SI MOVQ SI, 112(AX) RET @@ -1747,8 +1729,7 @@ loop_finished: MOVQ ctx+0(FP), AX MOVQ DX, 24(AX) MOVQ DI, 104(AX) - MOVQ 80(AX), CX - SUBQ CX, SI + SUBQ 80(AX), SI MOVQ SI, 112(AX) RET @@ -1760,8 +1741,7 @@ error_match_off_too_big: MOVQ ctx+0(FP), AX MOVQ DX, 24(AX) MOVQ DI, 104(AX) - MOVQ 80(AX), CX - SUBQ CX, SI + SUBQ 80(AX), SI MOVQ SI, 112(AX) RET diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 3eb3f1c82661a..5ffa82f5ac1bb 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -36,9 +36,6 @@ const forcePreDef = false // zstdMinMatch is the minimum zstd match length. const zstdMinMatch = 3 -// Reset the buffer offset when reaching this. -const bufferReset = math.MaxInt32 - MaxWindowSize - // fcsUnknown is used for unknown frame content size. const fcsUnknown = math.MaxUint64 @@ -75,7 +72,6 @@ var ( ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") // ErrUnknownDictionary is returned if the dictionary ID is unknown. - // For the time being dictionaries are not supported. ErrUnknownDictionary = errors.New("unknown dictionary") // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. @@ -110,26 +106,25 @@ func printf(format string, a ...interface{}) { } } -// matchLen returns the maximum length. +// matchLen returns the maximum common prefix length of a and b. // a must be the shortest of the two. -// The function also returns whether all bytes matched. -func matchLen(a, b []byte) int { - b = b[:len(a)] - for i := 0; i < len(a)-7; i += 8 { - if diff := load64(a, i) ^ load64(b, i); diff != 0 { - return i + (bits.TrailingZeros64(diff) >> 3) +func matchLen(a, b []byte) (n int) { + for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { + diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + if diff != 0 { + return n + bits.TrailingZeros64(diff)>>3 } + n += 8 } - checked := (len(a) >> 3) << 3 - a = a[checked:] - b = b[checked:] for i := range a { if a[i] != b[i] { - return i + checked + break } + n++ } - return len(a) + checked + return n + } func load3232(b []byte, i int32) uint32 { @@ -140,10 +135,6 @@ func load6432(b []byte, i int32) uint64 { return binary.LittleEndian.Uint64(b[i:]) } -func load64(b []byte, i int) uint64 { - return binary.LittleEndian.Uint64(b[i:]) -} - type byter interface { Bytes() []byte Len() int diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go index 581cf7cdfadf3..6f9e6fd3abffe 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go @@ -59,4 +59,13 @@ const ( // AnnotationBaseImageName is the annotation key for the image reference of the image's base image. AnnotationBaseImageName = "org.opencontainers.image.base.name" + + // AnnotationArtifactCreated is the annotation key for the date and time on which the artifact was built, conforming to RFC 3339. + AnnotationArtifactCreated = "org.opencontainers.artifact.created" + + // AnnotationArtifactDescription is the annotation key for the human readable description for the artifact. + AnnotationArtifactDescription = "org.opencontainers.artifact.description" + + // AnnotationReferrersFiltersApplied is the annotation key for the comma separated list of filters applied by the registry in the referrers listing. + AnnotationReferrersFiltersApplied = "org.opencontainers.referrers.filtersApplied" ) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go new file mode 100644 index 0000000000000..03d76ce437ae0 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/artifact.go @@ -0,0 +1,34 @@ +// Copyright 2022 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +// Artifact describes an artifact manifest. +// This structure provides `application/vnd.oci.artifact.manifest.v1+json` mediatype when marshalled to JSON. +type Artifact struct { + // MediaType is the media type of the object this schema refers to. + MediaType string `json:"mediaType"` + + // ArtifactType is the IANA media type of the artifact this schema refers to. + ArtifactType string `json:"artifactType"` + + // Blobs is a collection of blobs referenced by this manifest. + Blobs []Descriptor `json:"blobs,omitempty"` + + // Subject (reference) is an optional link from the artifact to another manifest forming an association between the artifact and the other manifest. + Subject *Descriptor `json:"subject,omitempty"` + + // Annotations contains arbitrary metadata for the artifact manifest. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go index ffff4b6d18637..e6aa113f074e2 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go @@ -48,6 +48,15 @@ type ImageConfig struct { // StopSignal contains the system call signal that will be sent to the container to exit. StopSignal string `json:"StopSignal,omitempty"` + + // ArgsEscaped `[Deprecated]` - This field is present only for legacy + // compatibility with Docker and should not be used by new image builders. + // It is used by Docker for Windows images to indicate that the `Entrypoint` + // or `Cmd` or both, contains only a single element array, that is a + // pre-escaped, and combined into a single string `CommandLine`. If `true` + // the value in `Entrypoint` or `Cmd` should be used as-is to avoid double + // escaping. + ArgsEscaped bool `json:"ArgsEscaped,omitempty"` } // RootFS describes a layer content addresses diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go index 94f19be62850b..9654aa5af68a9 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go @@ -1,4 +1,4 @@ -// Copyright 2016 The Linux Foundation +// Copyright 2016-2022 The Linux Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -44,6 +44,9 @@ type Descriptor struct { // // This should only be used when referring to a manifest. Platform *Platform `json:"platform,omitempty"` + + // ArtifactType is the IANA media type of this artifact. + ArtifactType string `json:"artifactType,omitempty"` } // Platform describes the platform which the image in the manifest runs on. diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go index 8212d520c06f8..730a09359b1cb 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go @@ -1,4 +1,4 @@ -// Copyright 2016 The Linux Foundation +// Copyright 2016-2022 The Linux Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -30,6 +30,9 @@ type Manifest struct { // Layers is an indexed list of layers referenced by the manifest. Layers []Descriptor `json:"layers"` + // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. + Subject *Descriptor `json:"subject,omitempty"` + // Annotations contains arbitrary metadata for the image manifest. Annotations map[string]string `json:"annotations,omitempty"` } diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go index 4f35ac134fefb..935b481e3ed56 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go @@ -54,4 +54,7 @@ const ( // MediaTypeImageConfig specifies the media type for the image configuration. MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" + + // MediaTypeArtifactManifest specifies the media type for a content descriptor. + MediaTypeArtifactManifest = "application/vnd.oci.artifact.manifest.v1+json" ) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go index 31f99cf645c43..1afd590fe0b55 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -20,9 +20,9 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 1 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 0 + VersionMinor = 1 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 2 + VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "-dev" diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go index 6a7a91e55969f..5b4f691c70ba3 100644 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go @@ -12,10 +12,12 @@ type Spec struct { Root *Root `json:"root,omitempty"` // Hostname configures the container's hostname. Hostname string `json:"hostname,omitempty"` + // Domainname configures the container's domainname. + Domainname string `json:"domainname,omitempty"` // Mounts configures additional mounts (on top of Root). Mounts []Mount `json:"mounts,omitempty"` // Hooks configures callbacks for container lifecycle events. - Hooks *Hooks `json:"hooks,omitempty" platform:"linux,solaris"` + Hooks *Hooks `json:"hooks,omitempty" platform:"linux,solaris,zos"` // Annotations contains arbitrary metadata for the container. Annotations map[string]string `json:"annotations,omitempty"` @@ -27,6 +29,8 @@ type Spec struct { Windows *Windows `json:"windows,omitempty" platform:"windows"` // VM specifies configuration for virtual-machine-based containers. VM *VM `json:"vm,omitempty" platform:"vm"` + // ZOS is platform-specific configuration for z/OS based containers. + ZOS *ZOS `json:"zos,omitempty" platform:"zos"` } // Process contains information to start a specific application inside the container. @@ -49,7 +53,7 @@ type Process struct { // Capabilities are Linux capabilities that are kept for the process. Capabilities *LinuxCapabilities `json:"capabilities,omitempty" platform:"linux"` // Rlimits specifies rlimit options to apply to the process. - Rlimits []POSIXRlimit `json:"rlimits,omitempty" platform:"linux,solaris"` + Rlimits []POSIXRlimit `json:"rlimits,omitempty" platform:"linux,solaris,zos"` // NoNewPrivileges controls whether additional privileges could be gained by processes in the container. NoNewPrivileges bool `json:"noNewPrivileges,omitempty" platform:"linux"` // ApparmorProfile specifies the apparmor profile for the container. @@ -86,11 +90,11 @@ type Box struct { // User specifies specific user (and group) information for the container process. type User struct { // UID is the user id. - UID uint32 `json:"uid" platform:"linux,solaris"` + UID uint32 `json:"uid" platform:"linux,solaris,zos"` // GID is the group id. - GID uint32 `json:"gid" platform:"linux,solaris"` + GID uint32 `json:"gid" platform:"linux,solaris,zos"` // Umask is the umask for the init process. - Umask *uint32 `json:"umask,omitempty" platform:"linux,solaris"` + Umask *uint32 `json:"umask,omitempty" platform:"linux,solaris,zos"` // AdditionalGids are additional group ids set for the container's process. AdditionalGids []uint32 `json:"additionalGids,omitempty" platform:"linux,solaris"` // Username is the user name. @@ -110,11 +114,16 @@ type Mount struct { // Destination is the absolute path where the mount will be placed in the container. Destination string `json:"destination"` // Type specifies the mount kind. - Type string `json:"type,omitempty" platform:"linux,solaris"` + Type string `json:"type,omitempty" platform:"linux,solaris,zos"` // Source specifies the source path of the mount. Source string `json:"source,omitempty"` // Options are fstab style mount options. Options []string `json:"options,omitempty"` + + // UID/GID mappings used for changing file owners w/o calling chown, fs should support it. + // Every mount point could have its own mapping. + UIDMappings []LinuxIDMapping `json:"uidMappings,omitempty" platform:"linux"` + GIDMappings []LinuxIDMapping `json:"gidMappings,omitempty" platform:"linux"` } // Hook specifies a command that is run at a particular event in the lifecycle of a container @@ -178,7 +187,7 @@ type Linux struct { // MountLabel specifies the selinux context for the mounts in the container. MountLabel string `json:"mountLabel,omitempty"` // IntelRdt contains Intel Resource Director Technology (RDT) information for - // handling resource constraints (e.g., L3 cache, memory bandwidth) for the container + // handling resource constraints and monitoring metrics (e.g., L3 cache, memory bandwidth) for the container IntelRdt *LinuxIntelRdt `json:"intelRdt,omitempty"` // Personality contains configuration for the Linux personality syscall Personality *LinuxPersonality `json:"personality,omitempty"` @@ -250,8 +259,8 @@ type LinuxInterfacePriority struct { Priority uint32 `json:"priority"` } -// linuxBlockIODevice holds major:minor format supported in blkio cgroup -type linuxBlockIODevice struct { +// LinuxBlockIODevice holds major:minor format supported in blkio cgroup +type LinuxBlockIODevice struct { // Major is the device's major number. Major int64 `json:"major"` // Minor is the device's minor number. @@ -260,7 +269,7 @@ type linuxBlockIODevice struct { // LinuxWeightDevice struct holds a `major:minor weight` pair for weightDevice type LinuxWeightDevice struct { - linuxBlockIODevice + LinuxBlockIODevice // Weight is the bandwidth rate for the device. Weight *uint16 `json:"weight,omitempty"` // LeafWeight is the bandwidth rate for the device while competing with the cgroup's child cgroups, CFQ scheduler only @@ -269,7 +278,7 @@ type LinuxWeightDevice struct { // LinuxThrottleDevice struct holds a `major:minor rate_per_second` pair type LinuxThrottleDevice struct { - linuxBlockIODevice + LinuxBlockIODevice // Rate is the IO rate limit per cgroup per device Rate uint64 `json:"rate"` } @@ -310,6 +319,10 @@ type LinuxMemory struct { DisableOOMKiller *bool `json:"disableOOMKiller,omitempty"` // Enables hierarchical memory accounting UseHierarchy *bool `json:"useHierarchy,omitempty"` + // CheckBeforeUpdate enables checking if a new memory limit is lower + // than the current usage during update, and if so, rejecting the new + // limit. + CheckBeforeUpdate *bool `json:"checkBeforeUpdate,omitempty"` } // LinuxCPU for Linux cgroup 'cpu' resource management @@ -318,6 +331,9 @@ type LinuxCPU struct { Shares *uint64 `json:"shares,omitempty"` // CPU hardcap limit (in usecs). Allowed cpu time in a given period. Quota *int64 `json:"quota,omitempty"` + // CPU hardcap burst limit (in usecs). Allowed accumulated cpu time additionally for burst in a + // given period. + Burst *uint64 `json:"burst,omitempty"` // CPU period to be used for hardcapping (in usecs). Period *uint64 `json:"period,omitempty"` // How much time realtime scheduling may use (in usecs). @@ -328,6 +344,8 @@ type LinuxCPU struct { Cpus string `json:"cpus,omitempty"` // List of memory nodes in the cpuset. Default is to use any available memory node. Mems string `json:"mems,omitempty"` + // cgroups are configured with minimum weight, 0: default behavior, 1: SCHED_IDLE. + Idle *int64 `json:"idle,omitempty"` } // LinuxPids for Linux cgroup 'pids' resource management (Linux 4.3) @@ -522,11 +540,21 @@ type WindowsMemoryResources struct { // WindowsCPUResources contains CPU resource management settings. type WindowsCPUResources struct { - // Number of CPUs available to the container. + // Count is the number of CPUs available to the container. It represents the + // fraction of the configured processor `count` in a container in relation + // to the processors available in the host. The fraction ultimately + // determines the portion of processor cycles that the threads in a + // container can use during each scheduling interval, as the number of + // cycles per 10,000 cycles. Count *uint64 `json:"count,omitempty"` - // CPU shares (relative weight to other containers with cpu shares). + // Shares limits the share of processor time given to the container relative + // to other workloads on the processor. The processor `shares` (`weight` at + // the platform level) is a value between 0 and 10000. Shares *uint16 `json:"shares,omitempty"` - // Specifies the portion of processor cycles that this container can use as a percentage times 100. + // Maximum determines the portion of processor cycles that the threads in a + // container can use during each scheduling interval, as the number of + // cycles per 10,000 cycles. Set processor `maximum` to a percentage times + // 100. Maximum *uint16 `json:"maximum,omitempty"` } @@ -613,6 +641,23 @@ type Arch string // LinuxSeccompFlag is a flag to pass to seccomp(2). type LinuxSeccompFlag string +const ( + // LinuxSeccompFlagLog is a seccomp flag to request all returned + // actions except SECCOMP_RET_ALLOW to be logged. An administrator may + // override this filter flag by preventing specific actions from being + // logged via the /proc/sys/kernel/seccomp/actions_logged file. (since + // Linux 4.14) + LinuxSeccompFlagLog LinuxSeccompFlag = "SECCOMP_FILTER_FLAG_LOG" + + // LinuxSeccompFlagSpecAllow can be used to disable Speculative Store + // Bypass mitigation. (since Linux 4.17) + LinuxSeccompFlagSpecAllow LinuxSeccompFlag = "SECCOMP_FILTER_FLAG_SPEC_ALLOW" + + // LinuxSeccompFlagWaitKillableRecv can be used to switch to the wait + // killable semantics. (since Linux 5.19) + LinuxSeccompFlagWaitKillableRecv LinuxSeccompFlag = "SECCOMP_FILTER_FLAG_WAIT_KILLABLE_RECV" +) + // Additional architectures permitted to be used for system calls // By default only the native architecture of the kernel is permitted const ( @@ -683,8 +728,9 @@ type LinuxSyscall struct { Args []LinuxSeccompArg `json:"args,omitempty"` } -// LinuxIntelRdt has container runtime resource constraints for Intel RDT -// CAT and MBA features which introduced in Linux 4.10 and 4.12 kernel +// LinuxIntelRdt has container runtime resource constraints for Intel RDT CAT and MBA +// features and flags enabling Intel RDT CMT and MBM features. +// Intel RDT features are available in Linux 4.14 and newer kernel versions. type LinuxIntelRdt struct { // The identity for RDT Class of Service ClosID string `json:"closID,omitempty"` @@ -697,4 +743,36 @@ type LinuxIntelRdt struct { // The unit of memory bandwidth is specified in "percentages" by // default, and in "MBps" if MBA Software Controller is enabled. MemBwSchema string `json:"memBwSchema,omitempty"` + + // EnableCMT is the flag to indicate if the Intel RDT CMT is enabled. CMT (Cache Monitoring Technology) supports monitoring of + // the last-level cache (LLC) occupancy for the container. + EnableCMT bool `json:"enableCMT,omitempty"` + + // EnableMBM is the flag to indicate if the Intel RDT MBM is enabled. MBM (Memory Bandwidth Monitoring) supports monitoring of + // total and local memory bandwidth for the container. + EnableMBM bool `json:"enableMBM,omitempty"` +} + +// ZOS contains platform-specific configuration for z/OS based containers. +type ZOS struct { + // Devices are a list of device nodes that are created for the container + Devices []ZOSDevice `json:"devices,omitempty"` +} + +// ZOSDevice represents the mknod information for a z/OS special device file +type ZOSDevice struct { + // Path to the device. + Path string `json:"path"` + // Device type, block, char, etc. + Type string `json:"type"` + // Major is the device's major number. + Major int64 `json:"major"` + // Minor is the device's minor number. + Minor int64 `json:"minor"` + // FileMode permission bits for the device. + FileMode *os.FileMode `json:"fileMode,omitempty"` + // UID of the device. + UID *uint32 `json:"uid,omitempty"` + // Gid of the device. + GID *uint32 `json:"gid,omitempty"` } diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go index 596af0c2fdb99..8ae4227b9bafc 100644 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go @@ -6,12 +6,12 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 1 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 0 + VersionMinor = 1 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 2 + VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "-dev" + VersionDev = "-rc.1" ) // Version is the specification version that the package types support. diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go index fea096c180f61..07e0f77dc278a 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label.go @@ -78,6 +78,9 @@ func ReleaseLabel(label string) error { // Deprecated: use selinux.DupSecOpt var DupSecOpt = selinux.DupSecOpt +// FormatMountLabel returns a string to be used by the mount command. Using +// the SELinux `context` mount option. Changing labels of files on mount +// points with this option can never be changed. // FormatMountLabel returns a string to be used by the mount command. // The format of this string will be used to alter the labeling of the mountpoint. // The string returned is suitable to be used as the options field of the mount command. @@ -85,12 +88,27 @@ var DupSecOpt = selinux.DupSecOpt // the first parameter. Second parameter is the label that you wish to apply // to all content in the mount point. func FormatMountLabel(src, mountLabel string) string { + return FormatMountLabelByType(src, mountLabel, "context") +} + +// FormatMountLabelByType returns a string to be used by the mount command. +// Allow caller to specify the mount options. For example using the SELinux +// `fscontext` mount option would allow certain container processes to change +// labels of files created on the mount points, where as `context` option does +// not. +// FormatMountLabelByType returns a string to be used by the mount command. +// The format of this string will be used to alter the labeling of the mountpoint. +// The string returned is suitable to be used as the options field of the mount command. +// If you need to have additional mount point options, you can pass them in as +// the first parameter. Second parameter is the label that you wish to apply +// to all content in the mount point. +func FormatMountLabelByType(src, mountLabel, contextType string) string { if mountLabel != "" { switch src { case "": - src = fmt.Sprintf("context=%q", mountLabel) + src = fmt.Sprintf("%s=%q", contextType, mountLabel) default: - src = fmt.Sprintf("%s,context=%q", src, mountLabel) + src = fmt.Sprintf("%s,%s=%q", src, contextType, mountLabel) } } return src diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go b/vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go deleted file mode 100644 index 8bff29355798c..0000000000000 --- a/vendor/github.com/opencontainers/selinux/go-selinux/rchcon.go +++ /dev/null @@ -1,34 +0,0 @@ -//go:build linux && go1.16 -// +build linux,go1.16 - -package selinux - -import ( - "errors" - "io/fs" - "os" - - "github.com/opencontainers/selinux/pkg/pwalkdir" -) - -func rchcon(fpath, label string) error { - fastMode := false - // If the current label matches the new label, assume - // other labels are correct. - if cLabel, err := lFileLabel(fpath); err == nil && cLabel == label { - fastMode = true - } - return pwalkdir.Walk(fpath, func(p string, _ fs.DirEntry, _ error) error { - if fastMode { - if cLabel, err := lFileLabel(fpath); err == nil && cLabel == label { - return nil - } - } - e := lSetFileLabel(p, label) - // Walk a file tree can race with removal, so ignore ENOENT. - if errors.Is(e, os.ErrNotExist) { - return nil - } - return e - }) -} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go b/vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go deleted file mode 100644 index 303cb1890ad42..0000000000000 --- a/vendor/github.com/opencontainers/selinux/go-selinux/rchcon_go115.go +++ /dev/null @@ -1,22 +0,0 @@ -//go:build linux && !go1.16 -// +build linux,!go1.16 - -package selinux - -import ( - "errors" - "os" - - "github.com/opencontainers/selinux/pkg/pwalk" -) - -func rchcon(fpath, label string) error { - return pwalk.Walk(fpath, func(p string, _ os.FileInfo, _ error) error { - e := lSetFileLabel(p, label) - // Walk a file tree can race with removal, so ignore ENOENT. - if errors.Is(e, os.ErrNotExist) { - return nil - } - return e - }) -} diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go index 5a59d151f6759..af058b84b1325 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go @@ -23,8 +23,13 @@ var ( // ErrEmptyPath is returned when an empty path has been specified. ErrEmptyPath = errors.New("empty path") + // ErrInvalidLabel is returned when an invalid label is specified. + ErrInvalidLabel = errors.New("invalid Label") + // InvalidLabel is returned when an invalid label is specified. - InvalidLabel = errors.New("Invalid Label") + // + // Deprecated: use [ErrInvalidLabel]. + InvalidLabel = ErrInvalidLabel // ErrIncomparable is returned two levels are not comparable ErrIncomparable = errors.New("incomparable levels") @@ -144,7 +149,7 @@ func CalculateGlbLub(sourceRange, targetRange string) (string, error) { // of the program is finished to guarantee another goroutine does not migrate to the current // thread before execution is complete. func SetExecLabel(label string) error { - return setExecLabel(label) + return writeCon(attrPath("exec"), label) } // SetTaskLabel sets the SELinux label for the current thread, or an error. @@ -152,21 +157,21 @@ func SetExecLabel(label string) error { // be wrapped in runtime.LockOSThread()/runtime.UnlockOSThread() to guarantee // the current thread does not run in a new mislabeled thread. func SetTaskLabel(label string) error { - return setTaskLabel(label) + return writeCon(attrPath("current"), label) } // SetSocketLabel takes a process label and tells the kernel to assign the // label to the next socket that gets created. Calls to SetSocketLabel // should be wrapped in runtime.LockOSThread()/runtime.UnlockOSThread() until -// the the socket is created to guarantee another goroutine does not migrate +// the socket is created to guarantee another goroutine does not migrate // to the current thread before execution is complete. func SetSocketLabel(label string) error { - return setSocketLabel(label) + return writeCon(attrPath("sockcreate"), label) } // SocketLabel retrieves the current socket label setting func SocketLabel() (string, error) { - return socketLabel() + return readCon(attrPath("sockcreate")) } // PeerLabel retrieves the label of the client on the other side of a socket @@ -185,7 +190,7 @@ func SetKeyLabel(label string) error { // KeyLabel retrieves the current kernel keyring label setting func KeyLabel() (string, error) { - return keyLabel() + return readCon("/proc/self/attr/keycreate") } // Get returns the Context as a string @@ -208,6 +213,11 @@ func ReserveLabel(label string) { reserveLabel(label) } +// MLSEnabled checks if MLS is enabled. +func MLSEnabled() bool { + return isMLSEnabled() +} + // EnforceMode returns the current SELinux mode Enforcing, Permissive, Disabled func EnforceMode() int { return enforceMode() @@ -220,7 +230,7 @@ func SetEnforceMode(mode int) error { } // DefaultEnforceMode returns the systems default SELinux mode Enforcing, -// Permissive or Disabled. Note this is is just the default at boot time. +// Permissive or Disabled. Note this is just the default at boot time. // EnforceMode tells you the systems current mode. func DefaultEnforceMode() int { return defaultEnforceMode() @@ -266,7 +276,7 @@ func CopyLevel(src, dest string) (string, error) { return copyLevel(src, dest) } -// Chcon changes the fpath file object to the SELinux label label. +// Chcon changes the fpath file object to the SELinux label. // If fpath is a directory and recurse is true, then Chcon walks the // directory tree setting the label. // @@ -284,7 +294,7 @@ func DupSecOpt(src string) ([]string, error) { // DisableSecOpt returns a security opt that can be used to disable SELinux // labeling support for future container processes. func DisableSecOpt() []string { - return disableSecOpt() + return []string{"disable"} } // GetDefaultContextWithLevel gets a single context for the specified SELinux user diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go index 4582cc9e0a2e3..f1e95977d3b04 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go @@ -8,16 +8,16 @@ import ( "errors" "fmt" "io" - "io/ioutil" + "io/fs" "math/big" "os" "os/user" - "path" "path/filepath" "strconv" "strings" "sync" + "github.com/opencontainers/selinux/pkg/pwalkdir" "golang.org/x/sys/unix" ) @@ -35,17 +35,17 @@ const ( ) type selinuxState struct { + mcsList map[string]bool + selinuxfs string + selinuxfsOnce sync.Once enabledSet bool enabled bool - selinuxfsOnce sync.Once - selinuxfs string - mcsList map[string]bool sync.Mutex } type level struct { - sens uint cats *big.Int + sens uint } type mlsRange struct { @@ -54,10 +54,10 @@ type mlsRange struct { } type defaultSECtx struct { - user, level, scon string - userRdr, defaultRdr io.Reader - - verifier func(string) error + userRdr io.Reader + verifier func(string) error + defaultRdr io.Reader + user, level, scon string } type levelItem byte @@ -155,7 +155,7 @@ func findSELinuxfs() string { } // check if selinuxfs is available before going the slow path - fs, err := ioutil.ReadFile("/proc/filesystems") + fs, err := os.ReadFile("/proc/filesystems") if err != nil { return "" } @@ -292,7 +292,7 @@ func readCon(fpath string) (string, error) { } func readConFd(in *os.File) (string, error) { - data, err := ioutil.ReadAll(in) + data, err := io.ReadAll(in) if err != nil { return "", err } @@ -305,7 +305,7 @@ func classIndex(class string) (int, error) { permpath := fmt.Sprintf("class/%s/index", class) indexpath := filepath.Join(getSelinuxMountPoint(), permpath) - indexB, err := ioutil.ReadFile(indexpath) + indexB, err := os.ReadFile(indexpath) if err != nil { return -1, err } @@ -391,21 +391,19 @@ func lFileLabel(fpath string) (string, error) { return string(label), nil } -// setFSCreateLabel tells kernel the label to create all file system objects -// created by this task. Setting label="" to return to default. func setFSCreateLabel(label string) error { - return writeAttr("fscreate", label) + return writeCon(attrPath("fscreate"), label) } // fsCreateLabel returns the default label the kernel which the kernel is using // for file system objects created by this task. "" indicates default. func fsCreateLabel() (string, error) { - return readAttr("fscreate") + return readCon(attrPath("fscreate")) } // currentLabel returns the SELinux label of the current process thread, or an error. func currentLabel() (string, error) { - return readAttr("current") + return readCon(attrPath("current")) } // pidLabel returns the SELinux label of the given pid, or an error. @@ -416,7 +414,7 @@ func pidLabel(pid int) (string, error) { // ExecLabel returns the SELinux label that the kernel will use for any programs // that are executed by the current process thread, or an error. func execLabel() (string, error) { - return readAttr("exec") + return readCon(attrPath("exec")) } func writeCon(fpath, val string) error { @@ -462,18 +460,10 @@ func attrPath(attr string) string { }) if haveThreadSelf { - return path.Join(threadSelfPrefix, attr) + return filepath.Join(threadSelfPrefix, attr) } - return path.Join("/proc/self/task/", strconv.Itoa(unix.Gettid()), "/attr/", attr) -} - -func readAttr(attr string) (string, error) { - return readCon(attrPath(attr)) -} - -func writeAttr(attr, val string) error { - return writeCon(attrPath(attr), val) + return filepath.Join("/proc/self/task", strconv.Itoa(unix.Gettid()), "attr", attr) } // canonicalizeContext takes a context string and writes it to the kernel @@ -560,30 +550,30 @@ func (l *level) parseLevel(levelStr string) error { // rangeStrToMLSRange marshals a string representation of a range. func rangeStrToMLSRange(rangeStr string) (*mlsRange, error) { - mlsRange := &mlsRange{} - levelSlice := strings.SplitN(rangeStr, "-", 2) + r := &mlsRange{} + l := strings.SplitN(rangeStr, "-", 2) - switch len(levelSlice) { + switch len(l) { // rangeStr that has a low and a high level, e.g. s4:c0.c1023-s6:c0.c1023 case 2: - mlsRange.high = &level{} - if err := mlsRange.high.parseLevel(levelSlice[1]); err != nil { - return nil, fmt.Errorf("failed to parse high level %q: %w", levelSlice[1], err) + r.high = &level{} + if err := r.high.parseLevel(l[1]); err != nil { + return nil, fmt.Errorf("failed to parse high level %q: %w", l[1], err) } fallthrough // rangeStr that is single level, e.g. s6:c0,c3,c5,c30.c1023 case 1: - mlsRange.low = &level{} - if err := mlsRange.low.parseLevel(levelSlice[0]); err != nil { - return nil, fmt.Errorf("failed to parse low level %q: %w", levelSlice[0], err) + r.low = &level{} + if err := r.low.parseLevel(l[0]); err != nil { + return nil, fmt.Errorf("failed to parse low level %q: %w", l[0], err) } } - if mlsRange.high == nil { - mlsRange.high = mlsRange.low + if r.high == nil { + r.high = r.low } - return mlsRange, nil + return r, nil } // bitsetToStr takes a category bitset and returns it in the @@ -617,17 +607,17 @@ func bitsetToStr(c *big.Int) string { return str } -func (l1 *level) equal(l2 *level) bool { - if l2 == nil || l1 == nil { - return l1 == l2 +func (l *level) equal(l2 *level) bool { + if l2 == nil || l == nil { + return l == l2 } - if l1.sens != l2.sens { + if l2.sens != l.sens { return false } - if l2.cats == nil || l1.cats == nil { - return l2.cats == l1.cats + if l2.cats == nil || l.cats == nil { + return l2.cats == l.cats } - return l1.cats.Cmp(l2.cats) == 0 + return l.cats.Cmp(l2.cats) == 0 } // String returns an mlsRange as a string. @@ -721,36 +711,13 @@ func readWriteCon(fpath string, val string) (string, error) { return readConFd(f) } -// setExecLabel sets the SELinux label that the kernel will use for any programs -// that are executed by the current process thread, or an error. -func setExecLabel(label string) error { - return writeAttr("exec", label) -} - -// setTaskLabel sets the SELinux label for the current thread, or an error. -// This requires the dyntransition permission. -func setTaskLabel(label string) error { - return writeAttr("current", label) -} - -// setSocketLabel takes a process label and tells the kernel to assign the -// label to the next socket that gets created -func setSocketLabel(label string) error { - return writeAttr("sockcreate", label) -} - -// socketLabel retrieves the current socket label setting -func socketLabel() (string, error) { - return readAttr("sockcreate") -} - // peerLabel retrieves the label of the client on the other side of a socket func peerLabel(fd uintptr) (string, error) { - label, err := unix.GetsockoptString(int(fd), unix.SOL_SOCKET, unix.SO_PEERSEC) + l, err := unix.GetsockoptString(int(fd), unix.SOL_SOCKET, unix.SO_PEERSEC) if err != nil { return "", &os.PathError{Op: "getsockopt", Path: "fd " + strconv.Itoa(int(fd)), Err: err} } - return label, nil + return l, nil } // setKeyLabel takes a process label and tells the kernel to assign the @@ -766,15 +733,10 @@ func setKeyLabel(label string) error { return err } -// keyLabel retrieves the current kernel keyring label setting -func keyLabel() (string, error) { - return readCon("/proc/self/attr/keycreate") -} - // get returns the Context as a string func (c Context) get() string { - if level := c["level"]; level != "" { - return c["user"] + ":" + c["role"] + ":" + c["type"] + ":" + level + if l := c["level"]; l != "" { + return c["user"] + ":" + c["role"] + ":" + c["type"] + ":" + l } return c["user"] + ":" + c["role"] + ":" + c["type"] } @@ -786,7 +748,7 @@ func newContext(label string) (Context, error) { if len(label) != 0 { con := strings.SplitN(label, ":", 4) if len(con) < 3 { - return c, InvalidLabel + return c, ErrInvalidLabel } c["user"] = con[0] c["role"] = con[1] @@ -816,14 +778,23 @@ func reserveLabel(label string) { } func selinuxEnforcePath() string { - return path.Join(getSelinuxMountPoint(), "enforce") + return filepath.Join(getSelinuxMountPoint(), "enforce") +} + +// isMLSEnabled checks if MLS is enabled. +func isMLSEnabled() bool { + enabledB, err := os.ReadFile(filepath.Join(getSelinuxMountPoint(), "mls")) + if err != nil { + return false + } + return bytes.Equal(enabledB, []byte{'1'}) } // enforceMode returns the current SELinux mode Enforcing, Permissive, Disabled func enforceMode() int { var enforce int - enforceB, err := ioutil.ReadFile(selinuxEnforcePath()) + enforceB, err := os.ReadFile(selinuxEnforcePath()) if err != nil { return -1 } @@ -837,11 +808,12 @@ func enforceMode() int { // setEnforceMode sets the current SELinux mode Enforcing, Permissive. // Disabled is not valid, since this needs to be set at boot time. func setEnforceMode(mode int) error { - return ioutil.WriteFile(selinuxEnforcePath(), []byte(strconv.Itoa(mode)), 0o644) + //nolint:gosec // ignore G306: permissions to be 0600 or less. + return os.WriteFile(selinuxEnforcePath(), []byte(strconv.Itoa(mode)), 0o644) } // defaultEnforceMode returns the systems default SELinux mode Enforcing, -// Permissive or Disabled. Note this is is just the default at boot time. +// Permissive or Disabled. Note this is just the default at boot time. // EnforceMode tells you the systems current mode. func defaultEnforceMode() int { switch readConfig(selinuxTag) { @@ -941,7 +913,7 @@ func openContextFile() (*os.File, error) { if f, err := os.Open(contextFile); err == nil { return f, nil } - return os.Open(filepath.Join(policyRoot(), "/contexts/lxc_contexts")) + return os.Open(filepath.Join(policyRoot(), "contexts", "lxc_contexts")) } func loadLabels() { @@ -1044,7 +1016,8 @@ func addMcs(processLabel, fileLabel string) (string, string) { // securityCheckContext validates that the SELinux label is understood by the kernel func securityCheckContext(val string) error { - return ioutil.WriteFile(path.Join(getSelinuxMountPoint(), "context"), []byte(val), 0o644) + //nolint:gosec // ignore G306: permissions to be 0600 or less. + return os.WriteFile(filepath.Join(getSelinuxMountPoint(), "context"), []byte(val), 0o644) } // copyLevel returns a label with the MLS/MCS level from src label replaced on @@ -1073,7 +1046,7 @@ func copyLevel(src, dest string) (string, error) { return tcon.Get(), nil } -// chcon changes the fpath file object to the SELinux label label. +// chcon changes the fpath file object to the SELinux label. // If fpath is a directory and recurse is true, then chcon walks the // directory tree setting the label. func chcon(fpath string, label string, recurse bool) error { @@ -1084,7 +1057,7 @@ func chcon(fpath string, label string, recurse bool) error { return nil } - exclude_paths := map[string]bool{ + excludePaths := map[string]bool{ "/": true, "/bin": true, "/boot": true, @@ -1112,19 +1085,19 @@ func chcon(fpath string, label string, recurse bool) error { } if home := os.Getenv("HOME"); home != "" { - exclude_paths[home] = true + excludePaths[home] = true } if sudoUser := os.Getenv("SUDO_USER"); sudoUser != "" { if usr, err := user.Lookup(sudoUser); err == nil { - exclude_paths[usr.HomeDir] = true + excludePaths[usr.HomeDir] = true } } if fpath != "/" { fpath = strings.TrimSuffix(fpath, "/") } - if exclude_paths[fpath] { + if excludePaths[fpath] { return fmt.Errorf("SELinux relabeling of %s is not allowed", fpath) } @@ -1152,6 +1125,28 @@ func chcon(fpath string, label string, recurse bool) error { return rchcon(fpath, label) } +func rchcon(fpath, label string) error { //revive:disable:cognitive-complexity + fastMode := false + // If the current label matches the new label, assume + // other labels are correct. + if cLabel, err := lFileLabel(fpath); err == nil && cLabel == label { + fastMode = true + } + return pwalkdir.Walk(fpath, func(p string, _ fs.DirEntry, _ error) error { + if fastMode { + if cLabel, err := lFileLabel(fpath); err == nil && cLabel == label { + return nil + } + } + err := lSetFileLabel(p, label) + // Walk a file tree can race with removal, so ignore ENOENT. + if errors.Is(err, os.ErrNotExist) { + return nil + } + return err + }) +} + // dupSecOpt takes an SELinux process label and returns security options that // can be used to set the SELinux Type and Level for future container processes. func dupSecOpt(src string) ([]string, error) { @@ -1180,12 +1175,6 @@ func dupSecOpt(src string) ([]string, error) { return dup, nil } -// disableSecOpt returns a security opt that can be used to disable SELinux -// labeling support for future container processes. -func disableSecOpt() []string { - return []string{"disable"} -} - // findUserInContext scans the reader for a valid SELinux context // match that is verified with the verifier. Invalid contexts are // skipped. It returns a matched context or an empty string if no diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go index 20d8880312189..bc3fd3b3701f4 100644 --- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go +++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go @@ -3,9 +3,20 @@ package selinux -func setDisabled() { +func attrPath(string) string { + return "" +} + +func readCon(fpath string) (string, error) { + return "", nil +} + +func writeCon(string, string) error { + return nil } +func setDisabled() {} + func getEnabled() bool { return false } @@ -62,22 +73,6 @@ func calculateGlbLub(sourceRange, targetRange string) (string, error) { return "", nil } -func setExecLabel(label string) error { - return nil -} - -func setTaskLabel(label string) error { - return nil -} - -func setSocketLabel(label string) error { - return nil -} - -func socketLabel() (string, error) { - return "", nil -} - func peerLabel(fd uintptr) (string, error) { return "", nil } @@ -86,17 +81,12 @@ func setKeyLabel(label string) error { return nil } -func keyLabel() (string, error) { - return "", nil -} - func (c Context) get() string { return "" } func newContext(label string) (Context, error) { - c := make(Context) - return c, nil + return Context{}, nil } func clearLabels() { @@ -105,6 +95,10 @@ func clearLabels() { func reserveLabel(label string) { } +func isMLSEnabled() bool { + return false +} + func enforceMode() int { return Disabled } @@ -152,10 +146,6 @@ func dupSecOpt(src string) ([]string, error) { return nil, nil } -func disableSecOpt() []string { - return []string{"disable"} -} - func getDefaultContextWithLevel(user, level, scon string) (string, error) { return "", nil } diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md b/vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md deleted file mode 100644 index 7e78dce0156b2..0000000000000 --- a/vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md +++ /dev/null @@ -1,48 +0,0 @@ -## pwalk: parallel implementation of filepath.Walk - -This is a wrapper for [filepath.Walk](https://pkg.go.dev/path/filepath?tab=doc#Walk) -which may speed it up by calling multiple callback functions (WalkFunc) in parallel, -utilizing goroutines. - -By default, it utilizes 2\*runtime.NumCPU() goroutines for callbacks. -This can be changed by using WalkN function which has the additional -parameter, specifying the number of goroutines (concurrency). - -### pwalk vs pwalkdir - -This package is deprecated in favor of -[pwalkdir](https://pkg.go.dev/github.com/opencontainers/selinux/pkg/pwalkdir), -which is faster, but requires at least Go 1.16. - -### Caveats - -Please note the following limitations of this code: - -* Unlike filepath.Walk, the order of calls is non-deterministic; - -* Only primitive error handling is supported: - - * filepath.SkipDir is not supported; - - * no errors are ever passed to WalkFunc; - - * once any error is returned from any WalkFunc instance, no more new calls - to WalkFunc are made, and the error is returned to the caller of Walk; - - * if more than one walkFunc instance will return an error, only one - of such errors will be propagated and returned by Walk, others - will be silently discarded. - -### Documentation - -For the official documentation, see -https://pkg.go.dev/github.com/opencontainers/selinux/pkg/pwalk?tab=doc - -### Benchmarks - -For a WalkFunc that consists solely of the return statement, this -implementation is about 10% slower than the standard library's -filepath.Walk. - -Otherwise (if a WalkFunc is doing something) this is usually faster, -except when the WalkN(..., 1) is used. diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go b/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go deleted file mode 100644 index 202c80da59c3b..0000000000000 --- a/vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go +++ /dev/null @@ -1,115 +0,0 @@ -package pwalk - -import ( - "fmt" - "os" - "path/filepath" - "runtime" - "sync" -) - -type WalkFunc = filepath.WalkFunc - -// Walk is a wrapper for filepath.Walk which can call multiple walkFn -// in parallel, allowing to handle each item concurrently. A maximum of -// twice the runtime.NumCPU() walkFn will be called at any one time. -// If you want to change the maximum, use WalkN instead. -// -// The order of calls is non-deterministic. -// -// Note that this implementation only supports primitive error handling: -// -// - no errors are ever passed to walkFn; -// -// - once a walkFn returns any error, all further processing stops -// and the error is returned to the caller of Walk; -// -// - filepath.SkipDir is not supported; -// -// - if more than one walkFn instance will return an error, only one -// of such errors will be propagated and returned by Walk, others -// will be silently discarded. -func Walk(root string, walkFn WalkFunc) error { - return WalkN(root, walkFn, runtime.NumCPU()*2) -} - -// WalkN is a wrapper for filepath.Walk which can call multiple walkFn -// in parallel, allowing to handle each item concurrently. A maximum of -// num walkFn will be called at any one time. -// -// Please see Walk documentation for caveats of using this function. -func WalkN(root string, walkFn WalkFunc, num int) error { - // make sure limit is sensible - if num < 1 { - return fmt.Errorf("walk(%q): num must be > 0", root) - } - - files := make(chan *walkArgs, 2*num) - errCh := make(chan error, 1) // get the first error, ignore others - - // Start walking a tree asap - var ( - err error - wg sync.WaitGroup - - rootLen = len(root) - rootEntry *walkArgs - ) - wg.Add(1) - go func() { - err = filepath.Walk(root, func(p string, info os.FileInfo, err error) error { - if err != nil { - close(files) - return err - } - if len(p) == rootLen { - // Root entry is processed separately below. - rootEntry = &walkArgs{path: p, info: &info} - return nil - } - // add a file to the queue unless a callback sent an error - select { - case e := <-errCh: - close(files) - return e - default: - files <- &walkArgs{path: p, info: &info} - return nil - } - }) - if err == nil { - close(files) - } - wg.Done() - }() - - wg.Add(num) - for i := 0; i < num; i++ { - go func() { - for file := range files { - if e := walkFn(file.path, *file.info, nil); e != nil { - select { - case errCh <- e: // sent ok - default: // buffer full - } - } - } - wg.Done() - }() - } - - wg.Wait() - - if err == nil { - err = walkFn(rootEntry.path, *rootEntry.info, nil) - } - - return err -} - -// walkArgs holds the arguments that were passed to the Walk or WalkN -// functions. -type walkArgs struct { - path string - info *os.FileInfo -} diff --git a/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go b/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go index a5796b2c4f1c5..0f5d9f580d1d3 100644 --- a/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go +++ b/vendor/github.com/opencontainers/selinux/pkg/pwalkdir/pwalkdir.go @@ -111,6 +111,6 @@ func WalkN(root string, walkFn fs.WalkDirFunc, num int) error { // walkArgs holds the arguments that were passed to the Walk or WalkN // functions. type walkArgs struct { - path string entry fs.DirEntry + path string } diff --git a/vendor/go.opencensus.io/Makefile b/vendor/go.opencensus.io/Makefile index b3ce3df3032df..d896edc996813 100644 --- a/vendor/go.opencensus.io/Makefile +++ b/vendor/go.opencensus.io/Makefile @@ -91,7 +91,7 @@ embedmd: .PHONY: install-tools install-tools: - go get -u golang.org/x/lint/golint - go get -u golang.org/x/tools/cmd/cover - go get -u golang.org/x/tools/cmd/goimports - go get -u github.com/rakyll/embedmd + go install golang.org/x/lint/golint@latest + go install golang.org/x/tools/cmd/cover@latest + go install golang.org/x/tools/cmd/goimports@latest + go install github.com/rakyll/embedmd@latest diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go index e5e4b4368c1a3..11e31f421c5d8 100644 --- a/vendor/go.opencensus.io/opencensus.go +++ b/vendor/go.opencensus.io/opencensus.go @@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io" // Version is the current release version of OpenCensus in use. func Version() string { - return "0.23.0" + return "0.24.0" } diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go index 49fde3d8c8262..fb3c19d6b646d 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go @@ -28,6 +28,7 @@ var ( ClientReceivedMessagesPerRPC = stats.Int64("grpc.io/client/received_messages_per_rpc", "Number of response messages received per RPC (always 1 for non-streaming RPCs).", stats.UnitDimensionless) ClientReceivedBytesPerRPC = stats.Int64("grpc.io/client/received_bytes_per_rpc", "Total bytes received across all response messages per RPC.", stats.UnitBytes) ClientRoundtripLatency = stats.Float64("grpc.io/client/roundtrip_latency", "Time between first byte of request sent to last byte of response received, or terminal error.", stats.UnitMilliseconds) + ClientStartedRPCs = stats.Int64("grpc.io/client/started_rpcs", "Number of started client RPCs.", stats.UnitDimensionless) ClientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds) ) @@ -70,6 +71,14 @@ var ( Aggregation: view.Count(), } + ClientStartedRPCsView = &view.View{ + Measure: ClientStartedRPCs, + Name: "grpc.io/client/started_rpcs", + Description: "Number of started client RPCs.", + TagKeys: []tag.Key{KeyClientMethod}, + Aggregation: view.Count(), + } + ClientSentMessagesPerRPCView = &view.View{ Measure: ClientSentMessagesPerRPC, Name: "grpc.io/client/sent_messages_per_rpc", diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go index b2059824a85fb..fe0e971086e6a 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go @@ -27,6 +27,7 @@ var ( ServerReceivedBytesPerRPC = stats.Int64("grpc.io/server/received_bytes_per_rpc", "Total bytes received across all messages per RPC.", stats.UnitBytes) ServerSentMessagesPerRPC = stats.Int64("grpc.io/server/sent_messages_per_rpc", "Number of messages sent in each RPC. Has value 1 for non-streaming RPCs.", stats.UnitDimensionless) ServerSentBytesPerRPC = stats.Int64("grpc.io/server/sent_bytes_per_rpc", "Total bytes sent in across all response messages per RPC.", stats.UnitBytes) + ServerStartedRPCs = stats.Int64("grpc.io/server/started_rpcs", "Number of started server RPCs.", stats.UnitDimensionless) ServerLatency = stats.Float64("grpc.io/server/server_latency", "Time between first byte of request received to last byte of response sent, or terminal error.", stats.UnitMilliseconds) ) @@ -73,6 +74,14 @@ var ( Aggregation: view.Count(), } + ServerStartedRPCsView = &view.View{ + Measure: ServerStartedRPCs, + Name: "grpc.io/server/started_rpcs", + Description: "Number of started server RPCs.", + TagKeys: []tag.Key{KeyServerMethod}, + Aggregation: view.Count(), + } + ServerReceivedMessagesPerRPCView = &view.View{ Name: "grpc.io/server/received_messages_per_rpc", Description: "Distribution of messages received count per RPC, by method.", diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go index 89cac9c4ec0bb..9cb27320ca132 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/stats_common.go @@ -82,8 +82,10 @@ func methodName(fullname string) string { // statsHandleRPC processes the RPC events. func statsHandleRPC(ctx context.Context, s stats.RPCStats) { switch st := s.(type) { - case *stats.Begin, *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer: + case *stats.OutHeader, *stats.InHeader, *stats.InTrailer, *stats.OutTrailer: // do nothing for client + case *stats.Begin: + handleRPCBegin(ctx, st) case *stats.OutPayload: handleRPCOutPayload(ctx, st) case *stats.InPayload: @@ -95,6 +97,25 @@ func statsHandleRPC(ctx context.Context, s stats.RPCStats) { } } +func handleRPCBegin(ctx context.Context, s *stats.Begin) { + d, ok := ctx.Value(rpcDataKey).(*rpcData) + if !ok { + if grpclog.V(2) { + grpclog.Infoln("Failed to retrieve *rpcData from context.") + } + } + + if s.IsClient() { + ocstats.RecordWithOptions(ctx, + ocstats.WithTags(tag.Upsert(KeyClientMethod, methodName(d.method))), + ocstats.WithMeasurements(ClientStartedRPCs.M(1))) + } else { + ocstats.RecordWithOptions(ctx, + ocstats.WithTags(tag.Upsert(KeyClientMethod, methodName(d.method))), + ocstats.WithMeasurements(ServerStartedRPCs.M(1))) + } +} + func handleRPCOutPayload(ctx context.Context, s *stats.OutPayload) { d, ok := ctx.Value(rpcDataKey).(*rpcData) if !ok { diff --git a/vendor/go.opencensus.io/plugin/ochttp/client.go b/vendor/go.opencensus.io/plugin/ochttp/client.go new file mode 100644 index 0000000000000..da815b2a73451 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/client.go @@ -0,0 +1,117 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "net/http" + "net/http/httptrace" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// Transport is an http.RoundTripper that instruments all outgoing requests with +// OpenCensus stats and tracing. +// +// The zero value is intended to be a useful default, but for +// now it's recommended that you explicitly set Propagation, since the default +// for this may change. +type Transport struct { + // Base may be set to wrap another http.RoundTripper that does the actual + // requests. By default http.DefaultTransport is used. + // + // If base HTTP roundtripper implements CancelRequest, + // the returned round tripper will be cancelable. + Base http.RoundTripper + + // Propagation defines how traces are propagated. If unspecified, a default + // (currently B3 format) will be used. + Propagation propagation.HTTPFormat + + // StartOptions are applied to the span started by this Transport around each + // request. + // + // StartOptions.SpanKind will always be set to trace.SpanKindClient + // for spans started by this transport. + StartOptions trace.StartOptions + + // GetStartOptions allows to set start options per request. If set, + // StartOptions is going to be ignored. + GetStartOptions func(*http.Request) trace.StartOptions + + // NameFromRequest holds the function to use for generating the span name + // from the information found in the outgoing HTTP Request. By default the + // name equals the URL Path. + FormatSpanName func(*http.Request) string + + // NewClientTrace may be set to a function allowing the current *trace.Span + // to be annotated with HTTP request event information emitted by the + // httptrace package. + NewClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace + + // TODO: Implement tag propagation for HTTP. +} + +// RoundTrip implements http.RoundTripper, delegating to Base and recording stats and traces for the request. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.base() + if isHealthEndpoint(req.URL.Path) { + return rt.RoundTrip(req) + } + // TODO: remove excessive nesting of http.RoundTrippers here. + format := t.Propagation + if format == nil { + format = defaultFormat + } + spanNameFormatter := t.FormatSpanName + if spanNameFormatter == nil { + spanNameFormatter = spanNameFromURL + } + + startOpts := t.StartOptions + if t.GetStartOptions != nil { + startOpts = t.GetStartOptions(req) + } + + rt = &traceTransport{ + base: rt, + format: format, + startOptions: trace.StartOptions{ + Sampler: startOpts.Sampler, + SpanKind: trace.SpanKindClient, + }, + formatSpanName: spanNameFormatter, + newClientTrace: t.NewClientTrace, + } + rt = statsTransport{base: rt} + return rt.RoundTrip(req) +} + +func (t *Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *Transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + cr.CancelRequest(req) + } +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go new file mode 100644 index 0000000000000..17142aabe002a --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go @@ -0,0 +1,143 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "context" + "io" + "net/http" + "strconv" + "sync" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" +) + +// statsTransport is an http.RoundTripper that collects stats for the outgoing requests. +type statsTransport struct { + base http.RoundTripper +} + +// RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request. +func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) { + ctx, _ := tag.New(req.Context(), + tag.Upsert(KeyClientHost, req.Host), + tag.Upsert(Host, req.Host), + tag.Upsert(KeyClientPath, req.URL.Path), + tag.Upsert(Path, req.URL.Path), + tag.Upsert(KeyClientMethod, req.Method), + tag.Upsert(Method, req.Method)) + req = req.WithContext(ctx) + track := &tracker{ + start: time.Now(), + ctx: ctx, + } + if req.Body == nil { + // TODO: Handle cases where ContentLength is not set. + track.reqSize = -1 + } else if req.ContentLength > 0 { + track.reqSize = req.ContentLength + } + stats.Record(ctx, ClientRequestCount.M(1)) + + // Perform request. + resp, err := t.base.RoundTrip(req) + + if err != nil { + track.statusCode = http.StatusInternalServerError + track.end() + } else { + track.statusCode = resp.StatusCode + if req.Method != "HEAD" { + track.respContentLength = resp.ContentLength + } + if resp.Body == nil { + track.end() + } else { + track.body = resp.Body + resp.Body = wrappedBody(track, resp.Body) + } + } + return resp, err +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t statsTransport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base.(canceler); ok { + cr.CancelRequest(req) + } +} + +type tracker struct { + ctx context.Context + respSize int64 + respContentLength int64 + reqSize int64 + start time.Time + body io.ReadCloser + statusCode int + endOnce sync.Once +} + +var _ io.ReadCloser = (*tracker)(nil) + +func (t *tracker) end() { + t.endOnce.Do(func() { + latencyMs := float64(time.Since(t.start)) / float64(time.Millisecond) + respSize := t.respSize + if t.respSize == 0 && t.respContentLength > 0 { + respSize = t.respContentLength + } + m := []stats.Measurement{ + ClientSentBytes.M(t.reqSize), + ClientReceivedBytes.M(respSize), + ClientRoundtripLatency.M(latencyMs), + ClientLatency.M(latencyMs), + ClientResponseBytes.M(t.respSize), + } + if t.reqSize >= 0 { + m = append(m, ClientRequestBytes.M(t.reqSize)) + } + + stats.RecordWithTags(t.ctx, []tag.Mutator{ + tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)), + tag.Upsert(KeyClientStatus, strconv.Itoa(t.statusCode)), + }, m...) + }) +} + +func (t *tracker) Read(b []byte) (int, error) { + n, err := t.body.Read(b) + t.respSize += int64(n) + switch err { + case nil: + return n, nil + case io.EOF: + t.end() + } + return n, err +} + +func (t *tracker) Close() error { + // Invoking endSpan on Close will help catch the cases + // in which a read returned a non-nil error, we set the + // span status but didn't end the span. + t.end() + return t.body.Close() +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/doc.go b/vendor/go.opencensus.io/plugin/ochttp/doc.go new file mode 100644 index 0000000000000..10e626b16e610 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/doc.go @@ -0,0 +1,19 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ochttp provides OpenCensus instrumentation for net/http package. +// +// For server instrumentation, see Handler. For client-side instrumentation, +// see Transport. +package ochttp // import "go.opencensus.io/plugin/ochttp" diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go new file mode 100644 index 0000000000000..9ad8852198d5b --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go @@ -0,0 +1,123 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package b3 contains a propagation.HTTPFormat implementation +// for B3 propagation. See https://github.com/openzipkin/b3-propagation +// for more details. +package b3 // import "go.opencensus.io/plugin/ochttp/propagation/b3" + +import ( + "encoding/hex" + "net/http" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// B3 headers that OpenCensus understands. +const ( + TraceIDHeader = "X-B3-TraceId" + SpanIDHeader = "X-B3-SpanId" + SampledHeader = "X-B3-Sampled" +) + +// HTTPFormat implements propagation.HTTPFormat to propagate +// traces in HTTP headers in B3 propagation format. +// HTTPFormat skips the X-B3-ParentId and X-B3-Flags headers +// because there are additional fields not represented in the +// OpenCensus span context. Spans created from the incoming +// header will be the direct children of the client-side span. +// Similarly, receiver of the outgoing spans should use client-side +// span created by OpenCensus as the parent. +type HTTPFormat struct{} + +var _ propagation.HTTPFormat = (*HTTPFormat)(nil) + +// SpanContextFromRequest extracts a B3 span context from incoming requests. +func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { + tid, ok := ParseTraceID(req.Header.Get(TraceIDHeader)) + if !ok { + return trace.SpanContext{}, false + } + sid, ok := ParseSpanID(req.Header.Get(SpanIDHeader)) + if !ok { + return trace.SpanContext{}, false + } + sampled, _ := ParseSampled(req.Header.Get(SampledHeader)) + return trace.SpanContext{ + TraceID: tid, + SpanID: sid, + TraceOptions: sampled, + }, true +} + +// ParseTraceID parses the value of the X-B3-TraceId header. +func ParseTraceID(tid string) (trace.TraceID, bool) { + if tid == "" { + return trace.TraceID{}, false + } + b, err := hex.DecodeString(tid) + if err != nil || len(b) > 16 { + return trace.TraceID{}, false + } + var traceID trace.TraceID + if len(b) <= 8 { + // The lower 64-bits. + start := 8 + (8 - len(b)) + copy(traceID[start:], b) + } else { + start := 16 - len(b) + copy(traceID[start:], b) + } + + return traceID, true +} + +// ParseSpanID parses the value of the X-B3-SpanId or X-B3-ParentSpanId headers. +func ParseSpanID(sid string) (spanID trace.SpanID, ok bool) { + if sid == "" { + return trace.SpanID{}, false + } + b, err := hex.DecodeString(sid) + if err != nil || len(b) > 8 { + return trace.SpanID{}, false + } + start := 8 - len(b) + copy(spanID[start:], b) + return spanID, true +} + +// ParseSampled parses the value of the X-B3-Sampled header. +func ParseSampled(sampled string) (trace.TraceOptions, bool) { + switch sampled { + case "true", "1": + return trace.TraceOptions(1), true + default: + return trace.TraceOptions(0), false + } +} + +// SpanContextToRequest modifies the given request to include B3 headers. +func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { + req.Header.Set(TraceIDHeader, hex.EncodeToString(sc.TraceID[:])) + req.Header.Set(SpanIDHeader, hex.EncodeToString(sc.SpanID[:])) + + var sampled string + if sc.IsSampled() { + sampled = "1" + } else { + sampled = "0" + } + req.Header.Set(SampledHeader, sampled) +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/route.go b/vendor/go.opencensus.io/plugin/ochttp/route.go new file mode 100644 index 0000000000000..5e6a3430760b6 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/route.go @@ -0,0 +1,61 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "context" + "net/http" + + "go.opencensus.io/tag" +) + +// SetRoute sets the http_server_route tag to the given value. +// It's useful when an HTTP framework does not support the http.Handler interface +// and using WithRouteTag is not an option, but provides a way to hook into the request flow. +func SetRoute(ctx context.Context, route string) { + if a, ok := ctx.Value(addedTagsKey{}).(*addedTags); ok { + a.t = append(a.t, tag.Upsert(KeyServerRoute, route)) + } +} + +// WithRouteTag returns an http.Handler that records stats with the +// http_server_route tag set to the given value. +func WithRouteTag(handler http.Handler, route string) http.Handler { + return taggedHandlerFunc(func(w http.ResponseWriter, r *http.Request) []tag.Mutator { + addRoute := []tag.Mutator{tag.Upsert(KeyServerRoute, route)} + ctx, _ := tag.New(r.Context(), addRoute...) + r = r.WithContext(ctx) + handler.ServeHTTP(w, r) + return addRoute + }) +} + +// taggedHandlerFunc is a http.Handler that returns tags describing the +// processing of the request. These tags will be recorded along with the +// measures in this package at the end of the request. +type taggedHandlerFunc func(w http.ResponseWriter, r *http.Request) []tag.Mutator + +func (h taggedHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) { + tags := h(w, r) + if a, ok := r.Context().Value(addedTagsKey{}).(*addedTags); ok { + a.t = append(a.t, tags...) + } +} + +type addedTagsKey struct{} + +type addedTags struct { + t []tag.Mutator +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go new file mode 100644 index 0000000000000..f7c8434be06cb --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/server.go @@ -0,0 +1,455 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "context" + "io" + "net/http" + "strconv" + "sync" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/tag" + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// Handler is an http.Handler wrapper to instrument your HTTP server with +// OpenCensus. It supports both stats and tracing. +// +// # Tracing +// +// This handler is aware of the incoming request's span, reading it from request +// headers as configured using the Propagation field. +// The extracted span can be accessed from the incoming request's +// context. +// +// span := trace.FromContext(r.Context()) +// +// The server span will be automatically ended at the end of ServeHTTP. +type Handler struct { + // Propagation defines how traces are propagated. If unspecified, + // B3 propagation will be used. + Propagation propagation.HTTPFormat + + // Handler is the handler used to handle the incoming request. + Handler http.Handler + + // StartOptions are applied to the span started by this Handler around each + // request. + // + // StartOptions.SpanKind will always be set to trace.SpanKindServer + // for spans started by this transport. + StartOptions trace.StartOptions + + // GetStartOptions allows to set start options per request. If set, + // StartOptions is going to be ignored. + GetStartOptions func(*http.Request) trace.StartOptions + + // IsPublicEndpoint should be set to true for publicly accessible HTTP(S) + // servers. If true, any trace metadata set on the incoming request will + // be added as a linked trace instead of being added as a parent of the + // current trace. + IsPublicEndpoint bool + + // FormatSpanName holds the function to use for generating the span name + // from the information found in the incoming HTTP Request. By default the + // name equals the URL Path. + FormatSpanName func(*http.Request) string + + // IsHealthEndpoint holds the function to use for determining if the + // incoming HTTP request should be considered a health check. This is in + // addition to the private isHealthEndpoint func which may also indicate + // tracing should be skipped. + IsHealthEndpoint func(*http.Request) bool +} + +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + var tags addedTags + r, traceEnd := h.startTrace(w, r) + defer traceEnd() + w, statsEnd := h.startStats(w, r) + defer statsEnd(&tags) + handler := h.Handler + if handler == nil { + handler = http.DefaultServeMux + } + r = r.WithContext(context.WithValue(r.Context(), addedTagsKey{}, &tags)) + handler.ServeHTTP(w, r) +} + +func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) { + if h.IsHealthEndpoint != nil && h.IsHealthEndpoint(r) || isHealthEndpoint(r.URL.Path) { + return r, func() {} + } + var name string + if h.FormatSpanName == nil { + name = spanNameFromURL(r) + } else { + name = h.FormatSpanName(r) + } + ctx := r.Context() + + startOpts := h.StartOptions + if h.GetStartOptions != nil { + startOpts = h.GetStartOptions(r) + } + + var span *trace.Span + sc, ok := h.extractSpanContext(r) + if ok && !h.IsPublicEndpoint { + ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc, + trace.WithSampler(startOpts.Sampler), + trace.WithSpanKind(trace.SpanKindServer)) + } else { + ctx, span = trace.StartSpan(ctx, name, + trace.WithSampler(startOpts.Sampler), + trace.WithSpanKind(trace.SpanKindServer), + ) + if ok { + span.AddLink(trace.Link{ + TraceID: sc.TraceID, + SpanID: sc.SpanID, + Type: trace.LinkTypeParent, + Attributes: nil, + }) + } + } + span.AddAttributes(requestAttrs(r)...) + if r.Body == nil { + // TODO: Handle cases where ContentLength is not set. + } else if r.ContentLength > 0 { + span.AddMessageReceiveEvent(0, /* TODO: messageID */ + r.ContentLength, -1) + } + return r.WithContext(ctx), span.End +} + +func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) { + if h.Propagation == nil { + return defaultFormat.SpanContextFromRequest(r) + } + return h.Propagation.SpanContextFromRequest(r) +} + +func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func(tags *addedTags)) { + ctx, _ := tag.New(r.Context(), + tag.Upsert(Host, r.Host), + tag.Upsert(Path, r.URL.Path), + tag.Upsert(Method, r.Method)) + track := &trackingResponseWriter{ + start: time.Now(), + ctx: ctx, + writer: w, + } + if r.Body == nil { + // TODO: Handle cases where ContentLength is not set. + track.reqSize = -1 + } else if r.ContentLength > 0 { + track.reqSize = r.ContentLength + } + stats.Record(ctx, ServerRequestCount.M(1)) + return track.wrappedResponseWriter(), track.end +} + +type trackingResponseWriter struct { + ctx context.Context + reqSize int64 + respSize int64 + start time.Time + statusCode int + statusLine string + endOnce sync.Once + writer http.ResponseWriter +} + +// Compile time assertion for ResponseWriter interface +var _ http.ResponseWriter = (*trackingResponseWriter)(nil) + +func (t *trackingResponseWriter) end(tags *addedTags) { + t.endOnce.Do(func() { + if t.statusCode == 0 { + t.statusCode = 200 + } + + span := trace.FromContext(t.ctx) + span.SetStatus(TraceStatus(t.statusCode, t.statusLine)) + span.AddAttributes(trace.Int64Attribute(StatusCodeAttribute, int64(t.statusCode))) + + m := []stats.Measurement{ + ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)), + ServerResponseBytes.M(t.respSize), + } + if t.reqSize >= 0 { + m = append(m, ServerRequestBytes.M(t.reqSize)) + } + allTags := make([]tag.Mutator, len(tags.t)+1) + allTags[0] = tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)) + copy(allTags[1:], tags.t) + stats.RecordWithTags(t.ctx, allTags, m...) + }) +} + +func (t *trackingResponseWriter) Header() http.Header { + return t.writer.Header() +} + +func (t *trackingResponseWriter) Write(data []byte) (int, error) { + n, err := t.writer.Write(data) + t.respSize += int64(n) + // Add message event for request bytes sent. + span := trace.FromContext(t.ctx) + span.AddMessageSendEvent(0 /* TODO: messageID */, int64(n), -1) + return n, err +} + +func (t *trackingResponseWriter) WriteHeader(statusCode int) { + t.writer.WriteHeader(statusCode) + t.statusCode = statusCode + t.statusLine = http.StatusText(t.statusCode) +} + +// wrappedResponseWriter returns a wrapped version of the original +// +// ResponseWriter and only implements the same combination of additional +// +// interfaces as the original. +// This implementation is based on https://github.com/felixge/httpsnoop. +func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter { + var ( + hj, i0 = t.writer.(http.Hijacker) + cn, i1 = t.writer.(http.CloseNotifier) + pu, i2 = t.writer.(http.Pusher) + fl, i3 = t.writer.(http.Flusher) + rf, i4 = t.writer.(io.ReaderFrom) + ) + + switch { + case !i0 && !i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + }{t} + case !i0 && !i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + io.ReaderFrom + }{t, rf} + case !i0 && !i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Flusher + }{t, fl} + case !i0 && !i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{t, fl, rf} + case !i0 && !i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Pusher + }{t, pu} + case !i0 && !i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Pusher + io.ReaderFrom + }{t, pu, rf} + case !i0 && !i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Pusher + http.Flusher + }{t, pu, fl} + case !i0 && !i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Pusher + http.Flusher + io.ReaderFrom + }{t, pu, fl, rf} + case !i0 && i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + }{t, cn} + case !i0 && i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{t, cn, rf} + case !i0 && i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Flusher + }{t, cn, fl} + case !i0 && i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Flusher + io.ReaderFrom + }{t, cn, fl, rf} + case !i0 && i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + }{t, cn, pu} + case !i0 && i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + io.ReaderFrom + }{t, cn, pu, rf} + case !i0 && i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + http.Flusher + }{t, cn, pu, fl} + case !i0 && i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + http.Flusher + io.ReaderFrom + }{t, cn, pu, fl, rf} + case i0 && !i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + }{t, hj} + case i0 && !i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{t, hj, rf} + case i0 && !i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Flusher + }{t, hj, fl} + case i0 && !i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Flusher + io.ReaderFrom + }{t, hj, fl, rf} + case i0 && !i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + }{t, hj, pu} + case i0 && !i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + io.ReaderFrom + }{t, hj, pu, rf} + case i0 && !i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + http.Flusher + }{t, hj, pu, fl} + case i0 && !i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + http.Flusher + io.ReaderFrom + }{t, hj, pu, fl, rf} + case i0 && i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + }{t, hj, cn} + case i0 && i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + io.ReaderFrom + }{t, hj, cn, rf} + case i0 && i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Flusher + }{t, hj, cn, fl} + case i0 && i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Flusher + io.ReaderFrom + }{t, hj, cn, fl, rf} + case i0 && i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + }{t, hj, cn, pu} + case i0 && i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + io.ReaderFrom + }{t, hj, cn, pu, rf} + case i0 && i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + http.Flusher + }{t, hj, cn, pu, fl} + case i0 && i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + http.Flusher + io.ReaderFrom + }{t, hj, cn, pu, fl, rf} + default: + return struct { + http.ResponseWriter + }{t} + } +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go b/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go new file mode 100644 index 0000000000000..05c6c56cc790f --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go @@ -0,0 +1,169 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "crypto/tls" + "net/http" + "net/http/httptrace" + "strings" + + "go.opencensus.io/trace" +) + +type spanAnnotator struct { + sp *trace.Span +} + +// TODO: Remove NewSpanAnnotator at the next release. + +// NewSpanAnnotator returns a httptrace.ClientTrace which annotates +// all emitted httptrace events on the provided Span. +// Deprecated: Use NewSpanAnnotatingClientTrace instead +func NewSpanAnnotator(r *http.Request, s *trace.Span) *httptrace.ClientTrace { + return NewSpanAnnotatingClientTrace(r, s) +} + +// NewSpanAnnotatingClientTrace returns a httptrace.ClientTrace which annotates +// all emitted httptrace events on the provided Span. +func NewSpanAnnotatingClientTrace(_ *http.Request, s *trace.Span) *httptrace.ClientTrace { + sa := spanAnnotator{sp: s} + + return &httptrace.ClientTrace{ + GetConn: sa.getConn, + GotConn: sa.gotConn, + PutIdleConn: sa.putIdleConn, + GotFirstResponseByte: sa.gotFirstResponseByte, + Got100Continue: sa.got100Continue, + DNSStart: sa.dnsStart, + DNSDone: sa.dnsDone, + ConnectStart: sa.connectStart, + ConnectDone: sa.connectDone, + TLSHandshakeStart: sa.tlsHandshakeStart, + TLSHandshakeDone: sa.tlsHandshakeDone, + WroteHeaders: sa.wroteHeaders, + Wait100Continue: sa.wait100Continue, + WroteRequest: sa.wroteRequest, + } +} + +func (s spanAnnotator) getConn(hostPort string) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.get_connection.host_port", hostPort), + } + s.sp.Annotate(attrs, "GetConn") +} + +func (s spanAnnotator) gotConn(info httptrace.GotConnInfo) { + attrs := []trace.Attribute{ + trace.BoolAttribute("httptrace.got_connection.reused", info.Reused), + trace.BoolAttribute("httptrace.got_connection.was_idle", info.WasIdle), + } + if info.WasIdle { + attrs = append(attrs, + trace.StringAttribute("httptrace.got_connection.idle_time", info.IdleTime.String())) + } + s.sp.Annotate(attrs, "GotConn") +} + +// PutIdleConn implements a httptrace.ClientTrace hook +func (s spanAnnotator) putIdleConn(err error) { + var attrs []trace.Attribute + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.put_idle_connection.error", err.Error())) + } + s.sp.Annotate(attrs, "PutIdleConn") +} + +func (s spanAnnotator) gotFirstResponseByte() { + s.sp.Annotate(nil, "GotFirstResponseByte") +} + +func (s spanAnnotator) got100Continue() { + s.sp.Annotate(nil, "Got100Continue") +} + +func (s spanAnnotator) dnsStart(info httptrace.DNSStartInfo) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.dns_start.host", info.Host), + } + s.sp.Annotate(attrs, "DNSStart") +} + +func (s spanAnnotator) dnsDone(info httptrace.DNSDoneInfo) { + var addrs []string + for _, addr := range info.Addrs { + addrs = append(addrs, addr.String()) + } + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.dns_done.addrs", strings.Join(addrs, " , ")), + } + if info.Err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.dns_done.error", info.Err.Error())) + } + s.sp.Annotate(attrs, "DNSDone") +} + +func (s spanAnnotator) connectStart(network, addr string) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.connect_start.network", network), + trace.StringAttribute("httptrace.connect_start.addr", addr), + } + s.sp.Annotate(attrs, "ConnectStart") +} + +func (s spanAnnotator) connectDone(network, addr string, err error) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.connect_done.network", network), + trace.StringAttribute("httptrace.connect_done.addr", addr), + } + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.connect_done.error", err.Error())) + } + s.sp.Annotate(attrs, "ConnectDone") +} + +func (s spanAnnotator) tlsHandshakeStart() { + s.sp.Annotate(nil, "TLSHandshakeStart") +} + +func (s spanAnnotator) tlsHandshakeDone(_ tls.ConnectionState, err error) { + var attrs []trace.Attribute + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.tls_handshake_done.error", err.Error())) + } + s.sp.Annotate(attrs, "TLSHandshakeDone") +} + +func (s spanAnnotator) wroteHeaders() { + s.sp.Annotate(nil, "WroteHeaders") +} + +func (s spanAnnotator) wait100Continue() { + s.sp.Annotate(nil, "Wait100Continue") +} + +func (s spanAnnotator) wroteRequest(info httptrace.WroteRequestInfo) { + var attrs []trace.Attribute + if info.Err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.wrote_request.error", info.Err.Error())) + } + s.sp.Annotate(attrs, "WroteRequest") +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/stats.go b/vendor/go.opencensus.io/plugin/ochttp/stats.go new file mode 100644 index 0000000000000..ee3729040ddd5 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/stats.go @@ -0,0 +1,292 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +// Deprecated: client HTTP measures. +var ( + // Deprecated: Use a Count aggregation over one of the other client measures to achieve the same effect. + ClientRequestCount = stats.Int64( + "opencensus.io/http/client/request_count", + "Number of HTTP requests started", + stats.UnitDimensionless) + // Deprecated: Use ClientSentBytes. + ClientRequestBytes = stats.Int64( + "opencensus.io/http/client/request_bytes", + "HTTP request body size if set as ContentLength (uncompressed)", + stats.UnitBytes) + // Deprecated: Use ClientReceivedBytes. + ClientResponseBytes = stats.Int64( + "opencensus.io/http/client/response_bytes", + "HTTP response body size (uncompressed)", + stats.UnitBytes) + // Deprecated: Use ClientRoundtripLatency. + ClientLatency = stats.Float64( + "opencensus.io/http/client/latency", + "End-to-end latency", + stats.UnitMilliseconds) +) + +// The following client HTTP measures are supported for use in custom views. +var ( + ClientSentBytes = stats.Int64( + "opencensus.io/http/client/sent_bytes", + "Total bytes sent in request body (not including headers)", + stats.UnitBytes, + ) + ClientReceivedBytes = stats.Int64( + "opencensus.io/http/client/received_bytes", + "Total bytes received in response bodies (not including headers but including error responses with bodies)", + stats.UnitBytes, + ) + ClientRoundtripLatency = stats.Float64( + "opencensus.io/http/client/roundtrip_latency", + "Time between first byte of request headers sent to last byte of response received, or terminal error", + stats.UnitMilliseconds, + ) +) + +// The following server HTTP measures are supported for use in custom views: +var ( + ServerRequestCount = stats.Int64( + "opencensus.io/http/server/request_count", + "Number of HTTP requests started", + stats.UnitDimensionless) + ServerRequestBytes = stats.Int64( + "opencensus.io/http/server/request_bytes", + "HTTP request body size if set as ContentLength (uncompressed)", + stats.UnitBytes) + ServerResponseBytes = stats.Int64( + "opencensus.io/http/server/response_bytes", + "HTTP response body size (uncompressed)", + stats.UnitBytes) + ServerLatency = stats.Float64( + "opencensus.io/http/server/latency", + "End-to-end latency", + stats.UnitMilliseconds) +) + +// The following tags are applied to stats recorded by this package. Host, Path +// and Method are applied to all measures. StatusCode is not applied to +// ClientRequestCount or ServerRequestCount, since it is recorded before the status is known. +var ( + // Host is the value of the HTTP Host header. + // + // The value of this tag can be controlled by the HTTP client, so you need + // to watch out for potentially generating high-cardinality labels in your + // metrics backend if you use this tag in views. + Host = tag.MustNewKey("http.host") + + // StatusCode is the numeric HTTP response status code, + // or "error" if a transport error occurred and no status code was read. + StatusCode = tag.MustNewKey("http.status") + + // Path is the URL path (not including query string) in the request. + // + // The value of this tag can be controlled by the HTTP client, so you need + // to watch out for potentially generating high-cardinality labels in your + // metrics backend if you use this tag in views. + Path = tag.MustNewKey("http.path") + + // Method is the HTTP method of the request, capitalized (GET, POST, etc.). + Method = tag.MustNewKey("http.method") + + // KeyServerRoute is a low cardinality string representing the logical + // handler of the request. This is usually the pattern registered on the a + // ServeMux (or similar string). + KeyServerRoute = tag.MustNewKey("http_server_route") +) + +// Client tag keys. +var ( + // KeyClientMethod is the HTTP method, capitalized (i.e. GET, POST, PUT, DELETE, etc.). + KeyClientMethod = tag.MustNewKey("http_client_method") + // KeyClientPath is the URL path (not including query string). + KeyClientPath = tag.MustNewKey("http_client_path") + // KeyClientStatus is the HTTP status code as an integer (e.g. 200, 404, 500.), or "error" if no response status line was received. + KeyClientStatus = tag.MustNewKey("http_client_status") + // KeyClientHost is the value of the request Host header. + KeyClientHost = tag.MustNewKey("http_client_host") +) + +// Default distributions used by views in this package. +var ( + DefaultSizeDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) + DefaultLatencyDistribution = view.Distribution(1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) +) + +// Package ochttp provides some convenience views for client measures. +// You still need to register these views for data to actually be collected. +var ( + ClientSentBytesDistribution = &view.View{ + Name: "opencensus.io/http/client/sent_bytes", + Measure: ClientSentBytes, + Aggregation: DefaultSizeDistribution, + Description: "Total bytes sent in request body (not including headers), by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientReceivedBytesDistribution = &view.View{ + Name: "opencensus.io/http/client/received_bytes", + Measure: ClientReceivedBytes, + Aggregation: DefaultSizeDistribution, + Description: "Total bytes received in response bodies (not including headers but including error responses with bodies), by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientRoundtripLatencyDistribution = &view.View{ + Name: "opencensus.io/http/client/roundtrip_latency", + Measure: ClientRoundtripLatency, + Aggregation: DefaultLatencyDistribution, + Description: "End-to-end latency, by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientCompletedCount = &view.View{ + Name: "opencensus.io/http/client/completed_count", + Measure: ClientRoundtripLatency, + Aggregation: view.Count(), + Description: "Count of completed requests, by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } +) + +// Deprecated: Old client Views. +var ( + // Deprecated: No direct replacement, but see ClientCompletedCount. + ClientRequestCountView = &view.View{ + Name: "opencensus.io/http/client/request_count", + Description: "Count of HTTP requests started", + Measure: ClientRequestCount, + Aggregation: view.Count(), + } + + // Deprecated: Use ClientSentBytesDistribution. + ClientRequestBytesView = &view.View{ + Name: "opencensus.io/http/client/request_bytes", + Description: "Size distribution of HTTP request body", + Measure: ClientSentBytes, + Aggregation: DefaultSizeDistribution, + } + + // Deprecated: Use ClientReceivedBytesDistribution instead. + ClientResponseBytesView = &view.View{ + Name: "opencensus.io/http/client/response_bytes", + Description: "Size distribution of HTTP response body", + Measure: ClientReceivedBytes, + Aggregation: DefaultSizeDistribution, + } + + // Deprecated: Use ClientRoundtripLatencyDistribution instead. + ClientLatencyView = &view.View{ + Name: "opencensus.io/http/client/latency", + Description: "Latency distribution of HTTP requests", + Measure: ClientRoundtripLatency, + Aggregation: DefaultLatencyDistribution, + } + + // Deprecated: Use ClientCompletedCount instead. + ClientRequestCountByMethod = &view.View{ + Name: "opencensus.io/http/client/request_count_by_method", + Description: "Client request count by HTTP method", + TagKeys: []tag.Key{Method}, + Measure: ClientSentBytes, + Aggregation: view.Count(), + } + + // Deprecated: Use ClientCompletedCount instead. + ClientResponseCountByStatusCode = &view.View{ + Name: "opencensus.io/http/client/response_count_by_status_code", + Description: "Client response count by status code", + TagKeys: []tag.Key{StatusCode}, + Measure: ClientRoundtripLatency, + Aggregation: view.Count(), + } +) + +// Package ochttp provides some convenience views for server measures. +// You still need to register these views for data to actually be collected. +var ( + ServerRequestCountView = &view.View{ + Name: "opencensus.io/http/server/request_count", + Description: "Count of HTTP requests started", + Measure: ServerRequestCount, + Aggregation: view.Count(), + } + + ServerRequestBytesView = &view.View{ + Name: "opencensus.io/http/server/request_bytes", + Description: "Size distribution of HTTP request body", + Measure: ServerRequestBytes, + Aggregation: DefaultSizeDistribution, + } + + ServerResponseBytesView = &view.View{ + Name: "opencensus.io/http/server/response_bytes", + Description: "Size distribution of HTTP response body", + Measure: ServerResponseBytes, + Aggregation: DefaultSizeDistribution, + } + + ServerLatencyView = &view.View{ + Name: "opencensus.io/http/server/latency", + Description: "Latency distribution of HTTP requests", + Measure: ServerLatency, + Aggregation: DefaultLatencyDistribution, + } + + ServerRequestCountByMethod = &view.View{ + Name: "opencensus.io/http/server/request_count_by_method", + Description: "Server request count by HTTP method", + TagKeys: []tag.Key{Method}, + Measure: ServerRequestCount, + Aggregation: view.Count(), + } + + ServerResponseCountByStatusCode = &view.View{ + Name: "opencensus.io/http/server/response_count_by_status_code", + Description: "Server response count by status code", + TagKeys: []tag.Key{StatusCode}, + Measure: ServerLatency, + Aggregation: view.Count(), + } +) + +// DefaultClientViews are the default client views provided by this package. +// Deprecated: No replacement. Register the views you would like individually. +var DefaultClientViews = []*view.View{ + ClientRequestCountView, + ClientRequestBytesView, + ClientResponseBytesView, + ClientLatencyView, + ClientRequestCountByMethod, + ClientResponseCountByStatusCode, +} + +// DefaultServerViews are the default server views provided by this package. +// Deprecated: No replacement. Register the views you would like individually. +var DefaultServerViews = []*view.View{ + ServerRequestCountView, + ServerRequestBytesView, + ServerResponseBytesView, + ServerLatencyView, + ServerRequestCountByMethod, + ServerResponseCountByStatusCode, +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace.go b/vendor/go.opencensus.io/plugin/ochttp/trace.go new file mode 100644 index 0000000000000..ed3a5db561134 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/trace.go @@ -0,0 +1,244 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "io" + "net/http" + "net/http/httptrace" + + "go.opencensus.io/plugin/ochttp/propagation/b3" + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +// TODO(jbd): Add godoc examples. + +var defaultFormat propagation.HTTPFormat = &b3.HTTPFormat{} + +// Attributes recorded on the span for the requests. +// Only trace exporters will need them. +const ( + HostAttribute = "http.host" + MethodAttribute = "http.method" + PathAttribute = "http.path" + URLAttribute = "http.url" + UserAgentAttribute = "http.user_agent" + StatusCodeAttribute = "http.status_code" +) + +type traceTransport struct { + base http.RoundTripper + startOptions trace.StartOptions + format propagation.HTTPFormat + formatSpanName func(*http.Request) string + newClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace +} + +// TODO(jbd): Add message events for request and response size. + +// RoundTrip creates a trace.Span and inserts it into the outgoing request's headers. +// The created span can follow a parent span, if a parent is presented in +// the request's context. +func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { + name := t.formatSpanName(req) + // TODO(jbd): Discuss whether we want to prefix + // outgoing requests with Sent. + ctx, span := trace.StartSpan(req.Context(), name, + trace.WithSampler(t.startOptions.Sampler), + trace.WithSpanKind(trace.SpanKindClient)) + + if t.newClientTrace != nil { + req = req.WithContext(httptrace.WithClientTrace(ctx, t.newClientTrace(req, span))) + } else { + req = req.WithContext(ctx) + } + + if t.format != nil { + // SpanContextToRequest will modify its Request argument, which is + // contrary to the contract for http.RoundTripper, so we need to + // pass it a copy of the Request. + // However, the Request struct itself was already copied by + // the WithContext calls above and so we just need to copy the header. + header := make(http.Header) + for k, v := range req.Header { + header[k] = v + } + req.Header = header + t.format.SpanContextToRequest(span.SpanContext(), req) + } + + span.AddAttributes(requestAttrs(req)...) + resp, err := t.base.RoundTrip(req) + if err != nil { + span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) + span.End() + return resp, err + } + + span.AddAttributes(responseAttrs(resp)...) + span.SetStatus(TraceStatus(resp.StatusCode, resp.Status)) + + // span.End() will be invoked after + // a read from resp.Body returns io.EOF or when + // resp.Body.Close() is invoked. + bt := &bodyTracker{rc: resp.Body, span: span} + resp.Body = wrappedBody(bt, resp.Body) + return resp, err +} + +// bodyTracker wraps a response.Body and invokes +// trace.EndSpan on encountering io.EOF on reading +// the body of the original response. +type bodyTracker struct { + rc io.ReadCloser + span *trace.Span +} + +var _ io.ReadCloser = (*bodyTracker)(nil) + +func (bt *bodyTracker) Read(b []byte) (int, error) { + n, err := bt.rc.Read(b) + + switch err { + case nil: + return n, nil + case io.EOF: + bt.span.End() + default: + // For all other errors, set the span status + bt.span.SetStatus(trace.Status{ + // Code 2 is the error code for Internal server error. + Code: 2, + Message: err.Error(), + }) + } + return n, err +} + +func (bt *bodyTracker) Close() error { + // Invoking endSpan on Close will help catch the cases + // in which a read returned a non-nil error, we set the + // span status but didn't end the span. + bt.span.End() + return bt.rc.Close() +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *traceTransport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base.(canceler); ok { + cr.CancelRequest(req) + } +} + +func spanNameFromURL(req *http.Request) string { + return req.URL.Path +} + +func requestAttrs(r *http.Request) []trace.Attribute { + userAgent := r.UserAgent() + + attrs := make([]trace.Attribute, 0, 5) + attrs = append(attrs, + trace.StringAttribute(PathAttribute, r.URL.Path), + trace.StringAttribute(URLAttribute, r.URL.String()), + trace.StringAttribute(HostAttribute, r.Host), + trace.StringAttribute(MethodAttribute, r.Method), + ) + + if userAgent != "" { + attrs = append(attrs, trace.StringAttribute(UserAgentAttribute, userAgent)) + } + + return attrs +} + +func responseAttrs(resp *http.Response) []trace.Attribute { + return []trace.Attribute{ + trace.Int64Attribute(StatusCodeAttribute, int64(resp.StatusCode)), + } +} + +// TraceStatus is a utility to convert the HTTP status code to a trace.Status that +// represents the outcome as closely as possible. +func TraceStatus(httpStatusCode int, statusLine string) trace.Status { + var code int32 + if httpStatusCode < 200 || httpStatusCode >= 400 { + code = trace.StatusCodeUnknown + } + switch httpStatusCode { + case 499: + code = trace.StatusCodeCancelled + case http.StatusBadRequest: + code = trace.StatusCodeInvalidArgument + case http.StatusUnprocessableEntity: + code = trace.StatusCodeInvalidArgument + case http.StatusGatewayTimeout: + code = trace.StatusCodeDeadlineExceeded + case http.StatusNotFound: + code = trace.StatusCodeNotFound + case http.StatusForbidden: + code = trace.StatusCodePermissionDenied + case http.StatusUnauthorized: // 401 is actually unauthenticated. + code = trace.StatusCodeUnauthenticated + case http.StatusTooManyRequests: + code = trace.StatusCodeResourceExhausted + case http.StatusNotImplemented: + code = trace.StatusCodeUnimplemented + case http.StatusServiceUnavailable: + code = trace.StatusCodeUnavailable + case http.StatusOK: + code = trace.StatusCodeOK + case http.StatusConflict: + code = trace.StatusCodeAlreadyExists + } + + return trace.Status{Code: code, Message: codeToStr[code]} +} + +var codeToStr = map[int32]string{ + trace.StatusCodeOK: `OK`, + trace.StatusCodeCancelled: `CANCELLED`, + trace.StatusCodeUnknown: `UNKNOWN`, + trace.StatusCodeInvalidArgument: `INVALID_ARGUMENT`, + trace.StatusCodeDeadlineExceeded: `DEADLINE_EXCEEDED`, + trace.StatusCodeNotFound: `NOT_FOUND`, + trace.StatusCodeAlreadyExists: `ALREADY_EXISTS`, + trace.StatusCodePermissionDenied: `PERMISSION_DENIED`, + trace.StatusCodeResourceExhausted: `RESOURCE_EXHAUSTED`, + trace.StatusCodeFailedPrecondition: `FAILED_PRECONDITION`, + trace.StatusCodeAborted: `ABORTED`, + trace.StatusCodeOutOfRange: `OUT_OF_RANGE`, + trace.StatusCodeUnimplemented: `UNIMPLEMENTED`, + trace.StatusCodeInternal: `INTERNAL`, + trace.StatusCodeUnavailable: `UNAVAILABLE`, + trace.StatusCodeDataLoss: `DATA_LOSS`, + trace.StatusCodeUnauthenticated: `UNAUTHENTICATED`, +} + +func isHealthEndpoint(path string) bool { + // Health checking is pretty frequent and + // traces collected for health endpoints + // can be extremely noisy and expensive. + // Disable canonical health checking endpoints + // like /healthz and /_ah/health for now. + if path == "/healthz" || path == "/_ah/health" { + return true + } + return false +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go b/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go new file mode 100644 index 0000000000000..7d75cae2b1853 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go @@ -0,0 +1,44 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "io" +) + +// wrappedBody returns a wrapped version of the original +// Body and only implements the same combination of additional +// interfaces as the original. +func wrappedBody(wrapper io.ReadCloser, body io.ReadCloser) io.ReadCloser { + var ( + wr, i0 = body.(io.Writer) + ) + switch { + case !i0: + return struct { + io.ReadCloser + }{wrapper} + + case i0: + return struct { + io.ReadCloser + io.Writer + }{wrapper, wr} + default: + return struct { + io.ReadCloser + }{wrapper} + } +} diff --git a/vendor/go.opencensus.io/stats/doc.go b/vendor/go.opencensus.io/stats/doc.go index 00d473ee02989..31477a464fd9a 100644 --- a/vendor/go.opencensus.io/stats/doc.go +++ b/vendor/go.opencensus.io/stats/doc.go @@ -19,7 +19,7 @@ Package stats contains support for OpenCensus stats recording. OpenCensus allows users to create typed measures, record measurements, aggregate the collected data, and export the aggregated data. -Measures +# Measures A measure represents a type of data point to be tracked and recorded. For example, latency, request Mb/s, and response Mb/s are measures @@ -33,7 +33,7 @@ Libraries can define and export measures. Application authors can then create views and collect and break down measures by the tags they are interested in. -Recording measurements +# Recording measurements Measurement is a data point to be collected for a measure. For example, for a latency (ms) measure, 100 is a measurement that represents a 100ms @@ -49,7 +49,7 @@ Libraries can always record measurements, and applications can later decide on which measurements they want to collect by registering views. This allows libraries to turn on the instrumentation by default. -Exemplars +# Exemplars For a given recorded measurement, the associated exemplar is a diagnostic map that gives more information about the measurement. @@ -64,6 +64,5 @@ then the trace span will be added to the exemplar associated with the measuremen When exported to a supporting back end, you should be able to easily navigate to example traces that fell into each bucket in the Distribution. - */ package stats // import "go.opencensus.io/stats" diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go index 36935e629b66c..436dc791f834c 100644 --- a/vendor/go.opencensus.io/stats/internal/record.go +++ b/vendor/go.opencensus.io/stats/internal/record.go @@ -21,5 +21,11 @@ import ( // DefaultRecorder will be called for each Record call. var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{}) +// MeasurementRecorder will be called for each Record call. This is the same as DefaultRecorder but +// avoids interface{} conversion. +// This will be a func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) type, +// but is interface{} here to avoid import loops +var MeasurementRecorder interface{} + // SubscriptionReporter reports when a view subscribed with a measure. var SubscriptionReporter func(measure string) diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go index 2b97283462e28..8b5b99803ce3e 100644 --- a/vendor/go.opencensus.io/stats/record.go +++ b/vendor/go.opencensus.io/stats/record.go @@ -86,10 +86,29 @@ func createRecordOption(ros ...Options) *recordOptions { return o } +type measurementRecorder = func(tags *tag.Map, measurement []Measurement, attachments map[string]interface{}) + // Record records one or multiple measurements with the same context at once. // If there are any tags in the context, measurements will be tagged with them. func Record(ctx context.Context, ms ...Measurement) { - RecordWithOptions(ctx, WithMeasurements(ms...)) + // Record behaves the same as RecordWithOptions, but because we do not have to handle generic functionality + // (RecordOptions) we can reduce some allocations to speed up this hot path + if len(ms) == 0 { + return + } + recorder := internal.MeasurementRecorder.(measurementRecorder) + record := false + for _, m := range ms { + if m.desc.subscribed() { + record = true + break + } + } + if !record { + return + } + recorder(tag.FromContext(ctx), ms, nil) + return } // RecordWithTags records one or multiple measurements at once. diff --git a/vendor/go.opencensus.io/stats/view/aggregation.go b/vendor/go.opencensus.io/stats/view/aggregation.go index 748bd568cda02..61f72d20da335 100644 --- a/vendor/go.opencensus.io/stats/view/aggregation.go +++ b/vendor/go.opencensus.io/stats/view/aggregation.go @@ -90,9 +90,9 @@ func Sum() *Aggregation { // // If len(bounds) >= 2 then the boundaries for bucket index i are: // -// [-infinity, bounds[i]) for i = 0 -// [bounds[i-1], bounds[i]) for 0 < i < length -// [bounds[i-1], +infinity) for i = length +// [-infinity, bounds[i]) for i = 0 +// [bounds[i-1], bounds[i]) for 0 < i < length +// [bounds[i-1], +infinity) for i = length // // If len(bounds) is 0 then there is no histogram associated with the // distribution. There will be a single bucket with boundaries diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go index ac22c93a2b572..bcd6e08c7481c 100644 --- a/vendor/go.opencensus.io/stats/view/collector.go +++ b/vendor/go.opencensus.io/stats/view/collector.go @@ -59,8 +59,15 @@ func (c *collector) clearRows() { // encodeWithKeys encodes the map by using values // only associated with the keys provided. func encodeWithKeys(m *tag.Map, keys []tag.Key) []byte { + // Compute the buffer length we will need ahead of time to avoid resizing later + reqLen := 0 + for _, k := range keys { + s, _ := m.Value(k) + // We will store each key + its length + reqLen += len(s) + 1 + } vb := &tagencoding.Values{ - Buffer: make([]byte, len(keys)), + Buffer: make([]byte, reqLen), } for _, k := range keys { v, _ := m.Value(k) diff --git a/vendor/go.opencensus.io/stats/view/doc.go b/vendor/go.opencensus.io/stats/view/doc.go index 7bbedfe1ff23a..60bf0e3925406 100644 --- a/vendor/go.opencensus.io/stats/view/doc.go +++ b/vendor/go.opencensus.io/stats/view/doc.go @@ -34,7 +34,7 @@ // Libraries can define views but it is recommended that in most cases registering // views be left up to applications. // -// Exporting +// # Exporting // // Collected and aggregated data can be exported to a metric collection // backend by registering its exporter. diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go index 6e8d18b7f6d3a..6a79cd8a34c30 100644 --- a/vendor/go.opencensus.io/stats/view/worker.go +++ b/vendor/go.opencensus.io/stats/view/worker.go @@ -33,6 +33,7 @@ func init() { defaultWorker = NewMeter().(*worker) go defaultWorker.start() internal.DefaultRecorder = record + internal.MeasurementRecorder = recordMeasurement } type measureRef struct { @@ -199,11 +200,21 @@ func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { defaultWorker.Record(tags, ms, attachments) } +func recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { + defaultWorker.recordMeasurement(tags, ms, attachments) +} + // Record records a set of measurements ms associated with the given tags and attachments. func (w *worker) Record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { + w.recordMeasurement(tags, ms.([]stats.Measurement), attachments) +} + +// recordMeasurement records a set of measurements ms associated with the given tags and attachments. +// This is the same as Record but without an interface{} type to avoid allocations +func (w *worker) recordMeasurement(tags *tag.Map, ms []stats.Measurement, attachments map[string]interface{}) { req := &recordReq{ tm: tags, - ms: ms.([]stats.Measurement), + ms: ms, attachments: attachments, t: time.Now(), } @@ -221,6 +232,11 @@ func SetReportingPeriod(d time.Duration) { defaultWorker.SetReportingPeriod(d) } +// Stop stops the default worker. +func Stop() { + defaultWorker.Stop() +} + // SetReportingPeriod sets the interval between reporting aggregated views in // the program. If duration is less than or equal to zero, it enables the // default behavior. @@ -281,7 +297,7 @@ func (w *worker) start() { case <-w.quit: w.timer.Stop() close(w.c) - w.done <- true + close(w.done) return } } @@ -290,8 +306,11 @@ func (w *worker) start() { func (w *worker) Stop() { prodMgr := metricproducer.GlobalManager() prodMgr.DeleteProducer(w) - - w.quit <- true + select { + case <-w.quit: + default: + close(w.quit) + } <-w.done } diff --git a/vendor/go.opencensus.io/tag/profile_19.go b/vendor/go.opencensus.io/tag/profile_19.go index b34d95e34a2cf..8fb17226fe3c5 100644 --- a/vendor/go.opencensus.io/tag/profile_19.go +++ b/vendor/go.opencensus.io/tag/profile_19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.9 // +build go1.9 package tag diff --git a/vendor/go.opencensus.io/tag/profile_not19.go b/vendor/go.opencensus.io/tag/profile_not19.go index 83adbce56b72a..e28cf13cde97a 100644 --- a/vendor/go.opencensus.io/tag/profile_not19.go +++ b/vendor/go.opencensus.io/tag/profile_not19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.9 // +build !go1.9 package tag diff --git a/vendor/go.opencensus.io/trace/doc.go b/vendor/go.opencensus.io/trace/doc.go index 04b1ee4f38eab..7a1616a55c5e3 100644 --- a/vendor/go.opencensus.io/trace/doc.go +++ b/vendor/go.opencensus.io/trace/doc.go @@ -18,24 +18,23 @@ Package trace contains support for OpenCensus distributed tracing. The following assumes a basic familiarity with OpenCensus concepts. See http://opencensus.io - -Exporting Traces +# Exporting Traces To export collected tracing data, register at least one exporter. You can use one of the provided exporters or write your own. - trace.RegisterExporter(exporter) + trace.RegisterExporter(exporter) By default, traces will be sampled relatively rarely. To change the sampling frequency for your entire program, call ApplyConfig. Use a ProbabilitySampler to sample a subset of traces, or use AlwaysSample to collect a trace on every run: - trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) Be careful about using trace.AlwaysSample in a production application with significant traffic: a new trace will be started and exported for every request. -Adding Spans to a Trace +# Adding Spans to a Trace A trace consists of a tree of spans. In Go, the current span is carried in a context.Context. @@ -44,8 +43,8 @@ It is common to want to capture all the activity of a function call in a span. F this to work, the function must take a context.Context as a parameter. Add these two lines to the top of the function: - ctx, span := trace.StartSpan(ctx, "example.com/Run") - defer span.End() + ctx, span := trace.StartSpan(ctx, "example.com/Run") + defer span.End() StartSpan will create a new top-level span if the context doesn't contain another span, otherwise it will create a child span. diff --git a/vendor/go.opencensus.io/trace/lrumap.go b/vendor/go.opencensus.io/trace/lrumap.go index 908c2497ed5b7..80095a5f6c03e 100644 --- a/vendor/go.opencensus.io/trace/lrumap.go +++ b/vendor/go.opencensus.io/trace/lrumap.go @@ -44,7 +44,7 @@ func (lm lruMap) len() int { } func (lm lruMap) keys() []interface{} { - keys := make([]interface{}, len(lm.cacheKeys)) + keys := make([]interface{}, 0, len(lm.cacheKeys)) for k := range lm.cacheKeys { keys = append(keys, k) } diff --git a/vendor/go.opencensus.io/trace/trace_go11.go b/vendor/go.opencensus.io/trace/trace_go11.go index b7d8aaf284778..b8fc1e495a9c9 100644 --- a/vendor/go.opencensus.io/trace/trace_go11.go +++ b/vendor/go.opencensus.io/trace/trace_go11.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.11 // +build go1.11 package trace diff --git a/vendor/go.opencensus.io/trace/trace_nongo11.go b/vendor/go.opencensus.io/trace/trace_nongo11.go index e25419859c02a..da488fc874014 100644 --- a/vendor/go.opencensus.io/trace/trace_nongo11.go +++ b/vendor/go.opencensus.io/trace/trace_nongo11.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.11 // +build !go1.11 package trace diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/grpctrace.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go similarity index 56% rename from vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/grpctrace.go rename to vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go index aaeb19e30d013..369ebae24a571 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/grpctrace.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go @@ -12,17 +12,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otelgrpc +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" import ( - "context" - - "google.golang.org/grpc/metadata" - "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/baggage" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/global" + "go.opentelemetry.io/otel/metric/instrument" + "go.opentelemetry.io/otel/metric/instrument/syncint64" + "go.opentelemetry.io/otel/metric/unit" "go.opentelemetry.io/otel/propagation" + semconv "go.opentelemetry.io/otel/semconv/v1.12.0" "go.opentelemetry.io/otel/trace" ) @@ -33,10 +34,20 @@ const ( GRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") ) +// Filter is a predicate used to determine whether a given request in +// interceptor info should be traced. A Filter must return true if +// the request should be traced. +type Filter func(*InterceptorInfo) bool + // config is a group of options for this instrumentation. type config struct { + Filter Filter Propagators propagation.TextMapPropagator TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider + + meter metric.Meter + rpcServerDuration syncint64.Histogram } // Option applies an option value for a config. @@ -49,10 +60,22 @@ func newConfig(opts []Option) *config { c := &config{ Propagators: otel.GetTextMapPropagator(), TracerProvider: otel.GetTracerProvider(), + MeterProvider: global.MeterProvider(), } for _, o := range opts { o.apply(c) } + + c.meter = c.MeterProvider.Meter( + instrumentationName, + metric.WithInstrumentationVersion(SemVersion()), + metric.WithSchemaURL(semconv.SchemaURL), + ) + var err error + if c.rpcServerDuration, err = c.meter.SyncInt64().Histogram("rpc.server.duration", instrument.WithUnit(unit.Milliseconds)); err != nil { + otel.Handle(err) + } + return c } @@ -78,57 +101,37 @@ func (o tracerProviderOption) apply(c *config) { } } -// WithTracerProvider returns an Option to use the TracerProvider when -// creating a Tracer. -func WithTracerProvider(tp trace.TracerProvider) Option { - return tracerProviderOption{tp: tp} +// WithInterceptorFilter returns an Option to use the request filter. +func WithInterceptorFilter(f Filter) Option { + return interceptorFilterOption{f: f} } -type metadataSupplier struct { - metadata *metadata.MD +type interceptorFilterOption struct { + f Filter } -// assert that metadataSupplier implements the TextMapCarrier interface -var _ propagation.TextMapCarrier = &metadataSupplier{} - -func (s *metadataSupplier) Get(key string) string { - values := s.metadata.Get(key) - if len(values) == 0 { - return "" +func (o interceptorFilterOption) apply(c *config) { + if o.f != nil { + c.Filter = o.f } - return values[0] } -func (s *metadataSupplier) Set(key string, value string) { - s.metadata.Set(key, value) +// WithTracerProvider returns an Option to use the TracerProvider when +// creating a Tracer. +func WithTracerProvider(tp trace.TracerProvider) Option { + return tracerProviderOption{tp: tp} } -func (s *metadataSupplier) Keys() []string { - out := make([]string, 0, len(*s.metadata)) - for key := range *s.metadata { - out = append(out, key) - } - return out -} +type meterProviderOption struct{ mp metric.MeterProvider } -// Inject injects correlation context and span context into the gRPC -// metadata object. This function is meant to be used on outgoing -// requests. -func Inject(ctx context.Context, metadata *metadata.MD, opts ...Option) { - c := newConfig(opts) - c.Propagators.Inject(ctx, &metadataSupplier{ - metadata: metadata, - }) +func (o meterProviderOption) apply(c *config) { + if o.mp != nil { + c.MeterProvider = o.mp + } } -// Extract returns the correlation context and span context that -// another service encoded in the gRPC metadata object with Inject. -// This function is meant to be used on incoming requests. -func Extract(ctx context.Context, metadata *metadata.MD, opts ...Option) (baggage.Baggage, trace.SpanContext) { - c := newConfig(opts) - ctx = c.Propagators.Extract(ctx, &metadataSupplier{ - metadata: metadata, - }) - - return baggage.FromContext(ctx), trace.SpanContextFromContext(ctx) +// WithMeterProvider returns an Option to use the MeterProvider when +// creating a Meter. If this option is not provide the global MeterProvider will be used. +func WithMeterProvider(mp metric.MeterProvider) Option { + return meterProviderOption{mp: mp} } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go index b1d9f643d84bc..aeee1d18dbdae 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otelgrpc +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" // gRPC tracing middleware // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/rpc.md @@ -20,20 +20,19 @@ import ( "context" "io" "net" - - "github.com/golang/protobuf/proto" // nolint:staticcheck + "time" "google.golang.org/grpc" grpc_codes "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/baggage" "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + semconv "go.opentelemetry.io/otel/semconv/v1.12.0" "go.opentelemetry.io/otel/trace" ) @@ -43,6 +42,9 @@ type messageType attribute.KeyValue // passed context with id and size (if message is a proto message). func (m messageType) Event(ctx context.Context, id int, message interface{}) { span := trace.SpanFromContext(ctx) + if !span.IsRecording() { + return + } if p, ok := message.(proto.Message); ok { span.AddEvent("message", trace.WithAttributes( attribute.KeyValue(m), @@ -65,6 +67,12 @@ var ( // UnaryClientInterceptor returns a grpc.UnaryClientInterceptor suitable // for use in a grpc.Dial call. func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { + cfg := newConfig(opts) + tracer := cfg.TracerProvider.Tracer( + instrumentationName, + trace.WithInstrumentationVersion(SemVersion()), + ) + return func( ctx context.Context, method string, @@ -73,13 +81,13 @@ func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { invoker grpc.UnaryInvoker, callOpts ...grpc.CallOption, ) error { - requestMetadata, _ := metadata.FromOutgoingContext(ctx) - metadataCopy := requestMetadata.Copy() - - tracer := newConfig(opts).TracerProvider.Tracer( - instrumentationName, - trace.WithInstrumentationVersion(SemVersion()), - ) + i := &InterceptorInfo{ + Method: method, + Type: UnaryClient, + } + if cfg.Filter != nil && !cfg.Filter(i) { + return invoker(ctx, method, req, reply, cc, callOpts...) + } name, attr := spanInfo(method, cc.Target()) var span trace.Span @@ -91,8 +99,7 @@ func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { ) defer span.End() - Inject(ctx, &metadataCopy, opts...) - ctx = metadata.NewOutgoingContext(ctx, metadataCopy) + ctx = inject(ctx, cfg.Propagators) messageSent.Event(ctx, 1, req) @@ -235,6 +242,12 @@ func (w *clientStream) sendStreamEvent(eventType streamEventType, err error) { // StreamClientInterceptor returns a grpc.StreamClientInterceptor suitable // for use in a grpc.Dial call. func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { + cfg := newConfig(opts) + tracer := cfg.TracerProvider.Tracer( + instrumentationName, + trace.WithInstrumentationVersion(SemVersion()), + ) + return func( ctx context.Context, desc *grpc.StreamDesc, @@ -243,13 +256,13 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { streamer grpc.Streamer, callOpts ...grpc.CallOption, ) (grpc.ClientStream, error) { - requestMetadata, _ := metadata.FromOutgoingContext(ctx) - metadataCopy := requestMetadata.Copy() - - tracer := newConfig(opts).TracerProvider.Tracer( - instrumentationName, - trace.WithInstrumentationVersion(SemVersion()), - ) + i := &InterceptorInfo{ + Method: method, + Type: StreamClient, + } + if cfg.Filter != nil && !cfg.Filter(i) { + return streamer(ctx, desc, cc, method, callOpts...) + } name, attr := spanInfo(method, cc.Target()) var span trace.Span @@ -260,8 +273,7 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { trace.WithAttributes(attr...), ) - Inject(ctx, &metadataCopy, opts...) - ctx = metadata.NewOutgoingContext(ctx, metadataCopy) + ctx = inject(ctx, cfg.Propagators) s, err := streamer(ctx, desc, cc, method, callOpts...) if err != nil { @@ -294,26 +306,31 @@ func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { // UnaryServerInterceptor returns a grpc.UnaryServerInterceptor suitable // for use in a grpc.NewServer call. func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { + cfg := newConfig(opts) + tracer := cfg.TracerProvider.Tracer( + instrumentationName, + trace.WithInstrumentationVersion(SemVersion()), + ) + return func( ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler, ) (interface{}, error) { - requestMetadata, _ := metadata.FromIncomingContext(ctx) - metadataCopy := requestMetadata.Copy() - - bags, spanCtx := Extract(ctx, &metadataCopy, opts...) - ctx = baggage.ContextWithBaggage(ctx, bags) + i := &InterceptorInfo{ + UnaryServerInfo: info, + Type: UnaryServer, + } + if cfg.Filter != nil && !cfg.Filter(i) { + return handler(ctx, req) + } - tracer := newConfig(opts).TracerProvider.Tracer( - instrumentationName, - trace.WithInstrumentationVersion(SemVersion()), - ) + ctx = extract(ctx, cfg.Propagators) name, attr := spanInfo(info.FullMethod, peerFromCtx(ctx)) ctx, span := tracer.Start( - trace.ContextWithRemoteSpanContext(ctx, spanCtx), + trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), name, trace.WithSpanKind(trace.SpanKindServer), trace.WithAttributes(attr...), @@ -322,13 +339,22 @@ func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { messageReceived.Event(ctx, 1, req) + var statusCode grpc_codes.Code + defer func(t time.Time) { + elapsedTime := time.Since(t) / time.Millisecond + attr = append(attr, semconv.RPCGRPCStatusCodeKey.Int64(int64(statusCode))) + cfg.rpcServerDuration.Record(ctx, int64(elapsedTime), attr...) + }(time.Now()) + resp, err := handler(ctx, req) if err != nil { s, _ := status.FromError(err) + statusCode = s.Code() span.SetStatus(codes.Error, s.Message()) span.SetAttributes(statusCodeAttr(s.Code())) messageSent.Event(ctx, 1, s.Proto()) } else { + statusCode = grpc_codes.OK span.SetAttributes(statusCodeAttr(grpc_codes.OK)) messageSent.Event(ctx, 1, resp) } @@ -381,6 +407,12 @@ func wrapServerStream(ctx context.Context, ss grpc.ServerStream) *serverStream { // StreamServerInterceptor returns a grpc.StreamServerInterceptor suitable // for use in a grpc.NewServer call. func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { + cfg := newConfig(opts) + tracer := cfg.TracerProvider.Tracer( + instrumentationName, + trace.WithInstrumentationVersion(SemVersion()), + ) + return func( srv interface{}, ss grpc.ServerStream, @@ -388,21 +420,19 @@ func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { handler grpc.StreamHandler, ) error { ctx := ss.Context() + i := &InterceptorInfo{ + StreamServerInfo: info, + Type: StreamServer, + } + if cfg.Filter != nil && !cfg.Filter(i) { + return handler(srv, wrapServerStream(ctx, ss)) + } - requestMetadata, _ := metadata.FromIncomingContext(ctx) - metadataCopy := requestMetadata.Copy() - - bags, spanCtx := Extract(ctx, &metadataCopy, opts...) - ctx = baggage.ContextWithBaggage(ctx, bags) - - tracer := newConfig(opts).TracerProvider.Tracer( - instrumentationName, - trace.WithInstrumentationVersion(SemVersion()), - ) + ctx = extract(ctx, cfg.Propagators) name, attr := spanInfo(info.FullMethod, peerFromCtx(ctx)) ctx, span := tracer.Start( - trace.ContextWithRemoteSpanContext(ctx, spanCtx), + trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), name, trace.WithSpanKind(trace.SpanKindServer), trace.WithAttributes(attr...), @@ -410,7 +440,6 @@ func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { defer span.End() err := handler(srv, wrapServerStream(ctx, ss)) - if err != nil { s, _ := status.FromError(err) span.SetStatus(codes.Error, s.Message()) @@ -459,7 +488,7 @@ func peerFromCtx(ctx context.Context) string { return p.Addr.String() } -// statusCodeAttr returns status code attribute based on given gRPC code +// statusCodeAttr returns status code attribute based on given gRPC code. func statusCodeAttr(c grpc_codes.Code) attribute.KeyValue { return GRPCStatusCodeKey.Int64(int64(c)) } diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go new file mode 100644 index 0000000000000..f6116946bfd85 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go @@ -0,0 +1,50 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + +import ( + "google.golang.org/grpc" +) + +// InterceptorType is the flag to define which gRPC interceptor +// the InterceptorInfo object is. +type InterceptorType uint8 + +const ( + // UndefinedInterceptor is the type for the interceptor information that is not + // well initialized or categorized to other types. + UndefinedInterceptor InterceptorType = iota + // UnaryClient is the type for grpc.UnaryClient interceptor. + UnaryClient + // StreamClient is the type for grpc.StreamClient interceptor. + StreamClient + // UnaryServer is the type for grpc.UnaryServer interceptor. + UnaryServer + // StreamServer is the type for grpc.StreamServer interceptor. + StreamServer +) + +// InterceptorInfo is the union of some arguments to four types of +// gRPC interceptors. +type InterceptorInfo struct { + // Method is method name registered to UnaryClient and StreamClient + Method string + // UnaryServerInfo is the metadata for UnaryServer + UnaryServerInfo *grpc.UnaryServerInfo + // StreamServerInfo if the metadata for StreamServer + StreamServerInfo *grpc.StreamServerInfo + // Type is the type for interceptor + Type InterceptorType +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go index 56a682102bb0b..bc214d363a24a 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go @@ -12,13 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -package internal +package internal // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + semconv "go.opentelemetry.io/otel/semconv/v1.12.0" ) // ParseFullMethod returns a span name following the OpenTelemetry semantic diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go new file mode 100644 index 0000000000000..d91c6df2370e8 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go @@ -0,0 +1,98 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + +import ( + "context" + + "google.golang.org/grpc/metadata" + + "go.opentelemetry.io/otel/baggage" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +type metadataSupplier struct { + metadata *metadata.MD +} + +// assert that metadataSupplier implements the TextMapCarrier interface. +var _ propagation.TextMapCarrier = &metadataSupplier{} + +func (s *metadataSupplier) Get(key string) string { + values := s.metadata.Get(key) + if len(values) == 0 { + return "" + } + return values[0] +} + +func (s *metadataSupplier) Set(key string, value string) { + s.metadata.Set(key, value) +} + +func (s *metadataSupplier) Keys() []string { + out := make([]string, 0, len(*s.metadata)) + for key := range *s.metadata { + out = append(out, key) + } + return out +} + +// Inject injects correlation context and span context into the gRPC +// metadata object. This function is meant to be used on outgoing +// requests. +// Deprecated: Unnecessary public func. +func Inject(ctx context.Context, md *metadata.MD, opts ...Option) { + c := newConfig(opts) + c.Propagators.Inject(ctx, &metadataSupplier{ + metadata: md, + }) +} + +func inject(ctx context.Context, propagators propagation.TextMapPropagator) context.Context { + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + md = metadata.MD{} + } + propagators.Inject(ctx, &metadataSupplier{ + metadata: &md, + }) + return metadata.NewOutgoingContext(ctx, md) +} + +// Extract returns the correlation context and span context that +// another service encoded in the gRPC metadata object with Inject. +// This function is meant to be used on incoming requests. +// Deprecated: Unnecessary public func. +func Extract(ctx context.Context, md *metadata.MD, opts ...Option) (baggage.Baggage, trace.SpanContext) { + c := newConfig(opts) + ctx = c.Propagators.Extract(ctx, &metadataSupplier{ + metadata: md, + }) + + return baggage.FromContext(ctx), trace.SpanContextFromContext(ctx) +} + +func extract(ctx context.Context, propagators propagation.TextMapPropagator) context.Context { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + md = metadata.MD{} + } + + return propagators.Extract(ctx, &metadataSupplier{ + metadata: &md, + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go index 73f14a458e19a..611c7f3017a72 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go @@ -12,11 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otelgrpc +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" import ( "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + semconv "go.opentelemetry.io/otel/semconv/v1.12.0" ) // Semantic conventions for attribute keys for gRPC. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go index f7c0e090794db..3c99ca80a26b6 100644 --- a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -12,11 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otelgrpc +package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" // Version is the current release version of the gRPC instrumentation. func Version() string { - return "0.29.0" + return "0.37.0" // This string is updated by the pre_release.sh script during release } diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore index 759cf53e0023e..0b605b3d67d4d 100644 --- a/vendor/go.opentelemetry.io/otel/.gitignore +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -11,11 +11,11 @@ coverage.* gen/ /example/fib/fib +/example/fib/traces.txt /example/jaeger/jaeger /example/namedtracer/namedtracer /example/opencensus/opencensus /example/passthrough/passthrough /example/prometheus/prometheus -/example/prom-collector/prom-collector /example/zipkin/zipkin /example/otel-collector/otel-collector diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml index 7a5fdc07abf59..0f099f5759592 100644 --- a/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -9,8 +9,9 @@ linters: disable-all: true # Specifically enable linters we want to use. enable: - - deadcode + - depguard - errcheck + - godot - gofmt - goimports - gosimple @@ -19,29 +20,225 @@ linters: - misspell - revive - staticcheck - - structcheck - typecheck - unused - - varcheck - issues: + # Maximum issues count per one linter. + # Set to 0 to disable. + # Default: 50 + # Setting to unlimited so the linter only is run once to debug all issues. + max-issues-per-linter: 0 + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + # Setting to unlimited so the linter only is run once to debug all issues. + max-same-issues: 0 + # Excluding configuration per-path, per-linter, per-text and per-source. exclude-rules: - # helpers in tests often (rightfully) pass a *testing.T as their first argument - - path: _test\.go - text: "context.Context should be the first parameter of a function" + # TODO: Having appropriate comments for exported objects helps development, + # even for objects in internal packages. Appropriate comments for all + # exported objects should be added and this exclusion removed. + - path: '.*internal/.*' + text: "exported (method|function|type|const) (.+) should have comment or be unexported" linters: - revive - # Yes, they are, but it's okay in a test + # Yes, they are, but it's okay in a test. - path: _test\.go text: "exported func.*returns unexported type.*which can be annoying to use" linters: - revive + # Example test functions should be treated like main. + - path: example.*_test\.go + text: "calls to (.+) only in main[(][)] or init[(][)] functions" + linters: + - revive + include: + # revive exported should have comment or be unexported. + - EXC0012 + # revive package comment should be of the form ... + - EXC0013 linters-settings: + depguard: + # Check the list against standard lib. + # Default: false + include-go-root: true + # A list of packages for the list type specified. + # Default: [] + packages: + - "crypto/md5" + - "crypto/sha1" + - "crypto/**/pkix" + ignore-file-rules: + - "**/*_test.go" + additional-guards: + # Do not allow testing packages in non-test files. + - list-type: denylist + include-go-root: true + packages: + - testing + - github.com/stretchr/testify + ignore-file-rules: + - "**/*_test.go" + - "**/*test/*.go" + - "**/internal/matchers/*.go" + godot: + exclude: + # Exclude sentence fragments for lists. + - '^[ ]*[-•]' + # Exclude sentences prefixing a list. + - ':$' + goimports: + local-prefixes: go.opentelemetry.io misspell: locale: US ignore-words: - cancelled - goimports: - local-prefixes: go.opentelemetry.io + revive: + # Sets the default failure confidence. + # This means that linting errors with less than 0.8 confidence will be ignored. + # Default: 0.8 + confidence: 0.01 + rules: + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports + - name: blank-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr + - name: bool-literal-in-expr + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr + - name: constant-logical-expr + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument + # TODO (#3372) reenable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280 + - name: context-as-argument + disabled: true + arguments: + allowTypesBefore: "*testing.T" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type + - name: context-keys-type + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit + - name: deep-exit + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer + - name: defer + disabled: false + arguments: + - ["call-chain", "loop"] + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports + - name: dot-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports + - name: duplicated-imports + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return + - name: early-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block + - name: empty-block + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines + - name: empty-lines + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming + - name: error-naming + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return + - name: error-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings + - name: error-strings + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf + - name: errorf + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported + - name: exported + disabled: false + arguments: + - "sayRepetitiveInsteadOfStutters" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter + - name: flag-parameter + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches + - name: identical-branches + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return + - name: if-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement + - name: increment-decrement + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow + - name: indent-error-flow + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing + - name: import-shadowing + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments + - name: package-comments + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range + - name: range + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure + - name: range-val-in-closure + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address + - name: range-val-address + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id + - name: redefines-builtin-id + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format + - name: string-format + disabled: false + arguments: + - - panic + - '/^[^\n]*$/' + - must not contain line breaks + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag + - name: struct-tag + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else + - name: superfluous-else + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal + - name: time-equal + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming + - name: var-naming + disabled: false + arguments: + - ["ID"] # AllowList + - ["Otel", "Aws", "Gcp"] # DenyList + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration + - name: var-declaration + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion + - name: unconditional-recursion + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return + - name: unexported-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error + - name: unhandled-error + disabled: false + arguments: + - "fmt.Fprint" + - "fmt.Fprintf" + - "fmt.Fprintln" + - "fmt.Print" + - "fmt.Printf" + - "fmt.Println" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt + - name: unnecessary-stmt + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break + - name: useless-break + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value + - name: waitgroup-by-value + disabled: false diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore new file mode 100644 index 0000000000000..32e4812755eef --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.lycheeignore @@ -0,0 +1,4 @@ +http://localhost +http://jaeger-collector +https://github.com/open-telemetry/opentelemetry-go/milestone/ +https://github.com/open-telemetry/opentelemetry-go/projects diff --git a/vendor/go.opentelemetry.io/otel/.markdown-link.json b/vendor/go.opentelemetry.io/otel/.markdown-link.json deleted file mode 100644 index f222ad89c310c..0000000000000 --- a/vendor/go.opentelemetry.io/otel/.markdown-link.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "ignorePatterns": [ - { - "pattern": "^http(s)?://localhost" - } - ], - "replacementPatterns": [ - { - "pattern": "^/registry", - "replacement": "https://opentelemetry.io/registry" - } - ], - "retryOn429": true, - "retryCount": 5, - "fallbackRetryDelay": "30s" -} diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md index 42dada4905bc8..2bfe0a9411baa 100644 --- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,537 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] +## [1.12.0/0.35.0] 2023-01-28 + +### Added + +- The `WithInt64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `int64` Observer callbacks during their creation. (#3507) +- The `WithFloat64Callback` option to `go.opentelemetry.io/otel/metric/instrument`. + This options is used to configure `float64` Observer callbacks during their creation. (#3507) +- The `Producer` interface and `Reader.RegisterProducer(Producer)` to `go.opentelemetry.io/otel/sdk/metric`. + These additions are used to enable external metric Producers. (#3524) +- The `Callback` function type to `go.opentelemetry.io/otel/metric`. + This new named function type is registered with a `Meter`. (#3564) +- The `go.opentelemetry.io/otel/semconv/v1.13.0` package. + The package contains semantic conventions from the `v1.13.0` version of the OpenTelemetry specification. (#3499) + - The `EndUserAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientRequest` and `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPAttributesFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientResponse` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPClientAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `HTTPServerMetricAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `NetAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `Transport` in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` and `ClientRequest` or `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `SpanStatusFromHTTPStatusCodeAndSpanKind` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `ClientStatus` and `ServerStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`. + - The `Client` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Conn`. + - The `Server` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Listener`. +- The `go.opentelemetry.io/otel/semconv/v1.14.0` package. + The package contains semantic conventions from the `v1.14.0` version of the OpenTelemetry specification. (#3566) +- The `go.opentelemetry.io/otel/semconv/v1.15.0` package. + The package contains semantic conventions from the `v1.15.0` version of the OpenTelemetry specification. (#3578) +- The `go.opentelemetry.io/otel/semconv/v1.16.0` package. + The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579) +- Metric instruments to `go.opentelemetry.io/otel/metric/instrument`. + These instruments are use as replacements of the depreacted `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586) + - `Float64ObservableCounter` replaces the `asyncfloat64.Counter` + - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter` + - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge` + - `Int64ObservableCounter` replaces the `asyncint64.Counter` + - `Int64ObservableUpDownCounter` replaces the `asyncint64.UpDownCounter` + - `Int64ObservableGauge` replaces the `asyncint64.Gauge` + - `Float64Counter` replaces the `syncfloat64.Counter` + - `Float64UpDownCounter` replaces the `syncfloat64.UpDownCounter` + - `Float64Histogram` replaces the `syncfloat64.Histogram` + - `Int64Counter` replaces the `syncint64.Counter` + - `Int64UpDownCounter` replaces the `syncint64.UpDownCounter` + - `Int64Histogram` replaces the `syncint64.Histogram` +- `NewTracerProvider` to `go.opentelemetry.io/otel/bridge/opentracing`. + This is used to create `WrapperTracer` instances from a `TracerProvider`. (#3116) +- The `Extrema` type to `go.opentelemetry.io/otel/sdk/metric/metricdata`. + This type is used to represent min/max values and still be able to distinguish unset and zero values. (#3487) +- The `go.opentelemetry.io/otel/semconv/v1.17.0` package. + The package contains semantic conventions from the `v1.17.0` version of the OpenTelemetry specification. (#3599) + +### Changed + +- Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500) +- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and confguration based on the instrument type. (#3507) + - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`. + - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`. + - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`. + - Use the added `Float64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncfloat64`. +- Return a `Registration` from the `RegisterCallback` method of a `Meter` in the `go.opentelemetry.io/otel/metric` package. + This `Registration` can be used to unregister callbacks. (#3522) +- Global error handler uses an atomic value instead of a mutex. (#3543) +- Add `NewMetricProducer` to `go.opentelemetry.io/otel/bridge/opencensus`, which can be used to pass OpenCensus metrics to an OpenTelemetry Reader. (#3541) +- Global logger uses an atomic value instead of a mutex. (#3545) +- The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551) +- The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions. + This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557) +- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in erros identifying their signal name. + Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516) +- Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514) +- The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562) + - `InstrumentKindSyncCounter` is renamed to `InstrumentKindCounter` + - `InstrumentKindSyncUpDownCounter` is renamed to `InstrumentKindUpDownCounter` + - `InstrumentKindSyncHistogram` is renamed to `InstrumentKindHistogram` + - `InstrumentKindAsyncCounter` is renamed to `InstrumentKindObservableCounter` + - `InstrumentKindAsyncUpDownCounter` is renamed to `InstrumentKindObservableUpDownCounter` + - `InstrumentKindAsyncGauge` is renamed to `InstrumentKindObservableGauge` +- The `RegisterCallback` method of the `Meter` in `go.opentelemetry.io/otel/metric` changed. + - The named `Callback` replaces the inline function parameter. (#3564) + - `Callback` is required to return an error. (#3576) + - `Callback` accepts the added `Observer` parameter added. + This new parameter is used by `Callback` implementations to observe values for asynchronous instruments instead of calling the `Observe` method of the instrument directly. (#3584) + - The slice of `instrument.Asynchronous` is now passed as a variadic argument. (#3587) +- The exporter from `go.opentelemetry.io/otel/exporters/zipkin` is updated to use the `v1.16.0` version of semantic conventions. + This means it no longer uses the removed `net.peer.ip` or `http.host` attributes to determine the remote endpoint. + Instead it uses the `net.sock.peer` attributes. (#3581) +- The `Min` and `Max` fields of the `HistogramDataPoint` in `go.opentelemetry.io/otel/sdk/metric/metricdata` are now defined with the added `Extrema` type instead of a `*float64`. (#3487) + +### Fixed + +- Asynchronous instruments that use sum aggregators and attribute filters correctly add values from equivalent attribute sets that have been filtered. (#3439, #3549) +- The `RegisterCallback` method of the `Meter` from `go.opentelemetry.io/otel/sdk/metric` only registers a callback for instruments created by that meter. + Trying to register a callback with instruments from a different meter will result in an error being returned. (#3584) + +### Deprecated + +- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` is deprecated. + Use `NewMetricProducer` instead. (#3541) +- The `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `go.opentelemetry.io/otel/metric/instrument/syncint64` package is deprecated. + Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575) +- The `NewWrappedTracerProvider` in `go.opentelemetry.io/otel/bridge/opentracing` is now deprecated. + Use `NewTracerProvider` instead. (#3116) + +### Removed + +- The deprecated `go.opentelemetry.io/otel/sdk/metric/view` package is removed. (#3520) +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Int64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Int64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64ObservableCounter` + - The `UpDownCounter` method is replaced by `Meter.Float64ObservableUpDownCounter` + - The `Gauge` method is replaced by `Meter.Float64ObservableGauge` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncint64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Int64Counter` + - The `UpDownCounter` method is replaced by `Meter.Int64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Int64Histogram` +- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncfloat64` is removed. + Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530) + - The `Counter` method is replaced by `Meter.Float64Counter` + - The `UpDownCounter` method is replaced by `Meter.Float64UpDownCounter` + - The `Histogram` method is replaced by `Meter.Float64Histogram` + +## [1.11.2/0.34.0] 2022-12-05 + +### Added + +- The `WithView` `Option` is added to the `go.opentelemetry.io/otel/sdk/metric` package. + This option is used to configure the view(s) a `MeterProvider` will use for all `Reader`s that are registered with it. (#3387) +- Add Instrumentation Scope and Version as info metric and label in Prometheus exporter. + This can be disabled using the `WithoutScopeInfo()` option added to that package.(#3273, #3357) +- OTLP exporters now recognize: (#3363) + - `OTEL_EXPORTER_OTLP_INSECURE` + - `OTEL_EXPORTER_OTLP_TRACES_INSECURE` + - `OTEL_EXPORTER_OTLP_METRICS_INSECURE` + - `OTEL_EXPORTER_OTLP_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY` + - `OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE` +- The `View` type and related `NewView` function to create a view according to the OpenTelemetry specification are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `View` type and `New` function from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Instrument` and `InstrumentKind` type are added to `go.opentelemetry.io/otel/sdk/metric`. + These additions are replacements for the `Instrument` and `InstrumentKind` types from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459) +- The `Stream` type is added to `go.opentelemetry.io/otel/sdk/metric` to define a metric data stream a view will produce. (#3459) +- The `AssertHasAttributes` allows instrument authors to test that datapoints returned have appropriate attributes. (#3487) + +### Changed + +- The `"go.opentelemetry.io/otel/sdk/metric".WithReader` option no longer accepts views to associate with the `Reader`. + Instead, views are now registered directly with the `MeterProvider` via the new `WithView` option. + The views registered with the `MeterProvider` apply to all `Reader`s. (#3387) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/sdk/metric".Exporter` interface. (#3260) +- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric".Client` interface. (#3260) +- The `WithTemporalitySelector` and `WithAggregationSelector` `ReaderOption`s have been changed to `ManualReaderOption`s in the `go.opentelemetry.io/otel/sdk/metric` package. (#3260) +- The periodic reader in the `go.opentelemetry.io/otel/sdk/metric` package now uses the temporality and aggregation selectors from its configured exporter instead of accepting them as options. (#3260) + +### Fixed + +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter fixes duplicated `_total` suffixes. (#3369) +- Remove comparable requirement for `Reader`s. (#3387) +- Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389) +- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398) +- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340) +- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436) +- Reenabled Attribute Filters in the Metric SDK. (#3396) +- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggragation. (#3408) +- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432) +- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440) +- Prevent duplicate Prometheus description, unit, and type. (#3469) +- Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489) + +### Removed + +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486) +- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486) + +### Deprecated + +- The `go.opentelemetry.io/otel/sdk/metric/view` package is deprecated. + Use `Instrument`, `InstrumentKind`, `View`, and `NewView` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3476) + +## [1.11.1/0.33.0] 2022-10-19 + +### Added + +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation. + By default, it will register with the default Prometheus registerer. + A non-default registerer can be used by passing the `WithRegisterer` option. (#3239) +- Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285) + +### Changed + +- The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error. + It will return an error if the exporter fails to register with Prometheus. (#3239) + +### Fixed + +- The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963) +- The `baggage.NewMember` function decodes the `value` parameter instead of directly using it. + This fixes the implementation to be compliant with the W3C specification. (#3226) +- Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252) +- The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281) +- The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293) +- Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278) +- `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358) +- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup. + Instead the exporter is defined as an "unchecked" collector for Prometheus. + This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342) +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360) +- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names. + This can be disabled using the `WithoutUnits()` option added to that package. (#3352) + +## [1.11.0/0.32.3] 2022-10-12 + +### Added + +- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlptrace/otlptracehttp`). (#3261) + +### Changed + +- `span.SetStatus` has been updated such that calls that lower the status are now no-ops. (#3214) +- Upgrade `golang.org/x/sys/unix` from `v0.0.0-20210423185535-09eb48e85fd7` to `v0.0.0-20220919091848-fb04ddd9f9c8`. + This addresses [GO-2022-0493](https://pkg.go.dev/vuln/GO-2022-0493). (#3235) + +## [0.32.2] Metric SDK (Alpha) - 2022-10-11 + +### Added + +- Added an example of using metric views to customize instruments. (#3177) +- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetrichttp`). (#3261) + +### Changed + +- Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220) +- Update histogram default bounds to match the requirements of the latest specification. (#3222) +- Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer. (#3265) + +### Fixed + +- Use default view if instrument does not match any registered view of a reader. (#3224, #3237) +- Return the same instrument every time a user makes the exact same instrument creation call. (#3229, #3251) +- Return the existing instrument when a view transforms a creation call to match an existing instrument. (#3240, #3251) +- Log a warning when a conflicting instrument (e.g. description, unit, data-type) is created instead of returning an error. (#3251) +- The OpenCensus bridge no longer sends empty batches of metrics. (#3263) + +## [0.32.1] Metric SDK (Alpha) - 2022-09-22 + +### Changed + +- The Prometheus exporter sanitizes OpenTelemetry instrument names when exporting. + Invalid characters are replaced with `_`. (#3212) + +### Added + +- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been reintroduced. (#3192) +- The OpenCensus bridge example (`go.opentelemetry.io/otel/example/opencensus`) has been reintroduced. (#3206) + +### Fixed + +- Updated go.mods to point to valid versions of the sdk. (#3216) +- Set the `MeterProvider` resource on all exported metric data. (#3218) + +## [0.32.0] Revised Metric SDK (Alpha) - 2022-09-18 + +### Changed + +- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification. + Please see the package documentation for how the new SDK is initialized and configured. (#3175) +- Update the minimum supported go version to go1.18. Removes support for go1.17 (#3179) + +### Removed + +- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed. + A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed. + A replacement package that supports the new metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175) +- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175) + +## [1.10.0] - 2022-09-09 + +### Added + +- Support Go 1.19. (#3077) + Include compatibility testing and document support. (#3077) +- Support the OTLP ExportTracePartialSuccess response; these are passed to the registered error handler. (#3106) +- Upgrade go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107) + +### Changed + +- Fix misidentification of OpenTelemetry `SpanKind` in OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`). (#3096) +- Attempting to start a span with a nil `context` will no longer cause a panic. (#3110) +- All exporters will be shutdown even if one reports an error (#3091) +- Ensure valid UTF-8 when truncating over-length attribute values. (#3156) + +## [1.9.0/0.0.3] - 2022-08-01 + +### Added + +- Add support for Schema Files format 1.1.x (metric "split" transform) with the new `go.opentelemetry.io/otel/schema/v1.1` package. (#2999) +- Add the `go.opentelemetry.io/otel/semconv/v1.11.0` package. + The package contains semantic conventions from the `v1.11.0` version of the OpenTelemetry specification. (#3009) +- Add the `go.opentelemetry.io/otel/semconv/v1.12.0` package. + The package contains semantic conventions from the `v1.12.0` version of the OpenTelemetry specification. (#3010) +- Add the `http.method` attribute to HTTP server metric from all `go.opentelemetry.io/otel/semconv/*` packages. (#3018) + +### Fixed + +- Invalid warning for context setup being deferred in `go.opentelemetry.io/otel/bridge/opentracing` package. (#3029) + +## [1.8.0/0.31.0] - 2022-07-08 + +### Added + +- Add support for `opentracing.TextMap` format in the `Inject` and `Extract` methods +of the `"go.opentelemetry.io/otel/bridge/opentracing".BridgeTracer` type. (#2911) + +### Changed + +- The `crosslink` make target has been updated to use the `go.opentelemetry.io/build-tools/crosslink` package. (#2886) +- In the `go.opentelemetry.io/otel/sdk/instrumentation` package rename `Library` to `Scope` and alias `Library` as `Scope` (#2976) +- Move metric no-op implementation form `nonrecording` to `metric` package. (#2866) + +### Removed + +- Support for go1.16. Support is now only for go1.17 and go1.18 (#2917) + +### Deprecated + +- The `Library` struct in the `go.opentelemetry.io/otel/sdk/instrumentation` package is deprecated. + Use the equivalent `Scope` struct instead. (#2977) +- The `ReadOnlySpan.InstrumentationLibrary` method from the `go.opentelemetry.io/otel/sdk/trace` package is deprecated. + Use the equivalent `ReadOnlySpan.InstrumentationScope` method instead. (#2977) + +## [1.7.0/0.30.0] - 2022-04-28 + +### Added + +- Add the `go.opentelemetry.io/otel/semconv/v1.8.0` package. + The package contains semantic conventions from the `v1.8.0` version of the OpenTelemetry specification. (#2763) +- Add the `go.opentelemetry.io/otel/semconv/v1.9.0` package. + The package contains semantic conventions from the `v1.9.0` version of the OpenTelemetry specification. (#2792) +- Add the `go.opentelemetry.io/otel/semconv/v1.10.0` package. + The package contains semantic conventions from the `v1.10.0` version of the OpenTelemetry specification. (#2842) +- Added an in-memory exporter to metrictest to aid testing with a full SDK. (#2776) + +### Fixed + +- Globally delegated instruments are unwrapped before delegating asynchronous callbacks. (#2784) +- Remove import of `testing` package in non-tests builds of the `go.opentelemetry.io/otel` package. (#2786) + +### Changed + +- The `WithLabelEncoder` option from the `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` package is renamed to `WithAttributeEncoder`. (#2790) +- The `LabelFilterSelector` interface from `go.opentelemetry.io/otel/sdk/metric/processor/reducer` is renamed to `AttributeFilterSelector`. + The method included in the renamed interface also changed from `LabelFilterFor` to `AttributeFilterFor`. (#2790) +- The `Metadata.Labels` method from the `go.opentelemetry.io/otel/sdk/metric/export` package is renamed to `Metadata.Attributes`. + Consequentially, the `Record` type from the same package also has had the embedded method renamed. (#2790) + +### Deprecated + +- The `Iterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `Iterator.Attribute` method instead. (#2790) +- The `Iterator.IndexedLabel` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `Iterator.IndexedAttribute` method instead. (#2790) +- The `MergeIterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated. + Use the equivalent `MergeIterator.Attribute` method instead. (#2790) + +### Removed + +- Removed the `Batch` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) +- Removed the `Measurement` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864) + +## [0.29.0] - 2022-04-11 + +### Added + +- The metrics global package was added back into several test files. (#2764) +- The `Meter` function is added back to the `go.opentelemetry.io/otel/metric/global` package. + This function is a convenience function equivalent to calling `global.MeterProvider().Meter(...)`. (#2750) + +### Removed + +- Removed module the `go.opentelemetry.io/otel/sdk/export/metric`. + Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2720) + +### Changed + +- Don't panic anymore when setting a global MeterProvider to itself. (#2749) +- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` from `v0.12.1` to `v0.15.0`. + This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibraryMetrics` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeMetrics`. (#2748) + +## [1.6.3] - 2022-04-07 + +### Fixed + +- Allow non-comparable global `MeterProvider`, `TracerProvider`, and `TextMapPropagator` types to be set. (#2772, #2773) + +## [1.6.2] - 2022-04-06 + +### Changed + +- Don't panic anymore when setting a global TracerProvider or TextMapPropagator to itself. (#2749) +- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from `v0.12.1` to `v0.15.0`. + This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibrarySpans` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeSpans`. (#2748) + +## [1.6.1] - 2022-03-28 + +### Fixed + +- The `go.opentelemetry.io/otel/schema/*` packages now use the correct schema URL for their `SchemaURL` constant. + Instead of using `"https://opentelemetry.io/schemas/v"` they now use the correct URL without a `v` prefix, `"https://opentelemetry.io/schemas/"`. (#2743, #2744) + +### Security + +- Upgrade `go.opentelemetry.io/proto/otlp` from `v0.12.0` to `v0.12.1`. + This includes an indirect upgrade of `github.com/grpc-ecosystem/grpc-gateway` which resolves [a vulnerability](https://nvd.nist.gov/vuln/detail/CVE-2019-11254) from `gopkg.in/yaml.v2` in version `v2.2.3`. (#2724, #2728) + +## [1.6.0/0.28.0] - 2022-03-23 + +### ⚠️ Notice ⚠️ + +This update is a breaking change of the unstable Metrics API. +Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be modified. + +### Added + +- Add metrics exponential histogram support. + New mapping functions have been made available in `sdk/metric/aggregator/exponential/mapping` for other OpenTelemetry projects to take dependencies on. (#2502) +- Add Go 1.18 to our compatibility tests. (#2679) +- Allow configuring the Sampler with the `OTEL_TRACES_SAMPLER` and `OTEL_TRACES_SAMPLER_ARG` environment variables. (#2305, #2517) +- Add the `metric/global` for obtaining and setting the global `MeterProvider`. (#2660) + +### Changed + +- The metrics API has been significantly changed to match the revised OpenTelemetry specification. + High-level changes include: + + - Synchronous and asynchronous instruments are now handled by independent `InstrumentProvider`s. + These `InstrumentProvider`s are managed with a `Meter`. + - Synchronous and asynchronous instruments are grouped into their own packages based on value types. + - Asynchronous callbacks can now be registered with a `Meter`. + + Be sure to check out the metric module documentation for more information on how to use the revised API. (#2587, #2660) + +### Fixed + +- Fallback to general attribute limits when span specific ones are not set in the environment. (#2675, #2677) + +## [1.5.0] - 2022-03-16 + +### Added + +- Log the Exporters configuration in the TracerProviders message. (#2578) +- Added support to configure the span limits with environment variables. + The following environment variables are supported. (#2606, #2637) + - `OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT` + - `OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_EVENT_COUNT_LIMIT` + - `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT` + - `OTEL_SPAN_LINK_COUNT_LIMIT` + - `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT` + + If the provided environment variables are invalid (negative), the default values would be used. +- Rename the `gc` runtime name to `go` (#2560) +- Add resource container ID detection. (#2418) +- Add span attribute value length limit. + The new `AttributeValueLengthLimit` field is added to the `"go.opentelemetry.io/otel/sdk/trace".SpanLimits` type to configure this limit for a `TracerProvider`. + The default limit for this resource is "unlimited". (#2637) +- Add the `WithRawSpanLimits` option to `go.opentelemetry.io/otel/sdk/trace`. + This option replaces the `WithSpanLimits` option. + Zero or negative values will not be changed to the default value like `WithSpanLimits` does. + Setting a limit to zero will effectively disable the related resource it limits and setting to a negative value will mean that resource is unlimited. + Consequentially, limits should be constructed using `NewSpanLimits` and updated accordingly. (#2637) + +### Changed + +- Drop oldest tracestate `Member` when capacity is reached. (#2592) +- Add event and link drop counts to the exported data from the `oltptrace` exporter. (#2601) +- Unify path cleaning functionally in the `otlpmetric` and `otlptrace` configuration. (#2639) +- Change the debug message from the `sdk/trace.BatchSpanProcessor` to reflect the count is cumulative. (#2640) +- Introduce new internal `envconfig` package for OTLP exporters. (#2608) +- If `http.Request.Host` is empty, fall back to use `URL.Host` when populating `http.host` in the `semconv` packages. (#2661) + +### Fixed + +- Remove the OTLP trace exporter limit of SpanEvents when exporting. (#2616) +- Default to port `4318` instead of `4317` for the `otlpmetrichttp` and `otlptracehttp` client. (#2614, #2625) +- Unlimited span limits are now supported (negative values). (#2636, #2637) + +### Deprecated + +- Deprecated `"go.opentelemetry.io/otel/sdk/trace".WithSpanLimits`. + Use `WithRawSpanLimits` instead. + That option allows setting unlimited and zero limits, this option does not. + This option will be kept until the next major version incremented release. (#2637) + ## [1.4.1] - 2022-02-16 ### Fixed @@ -1689,7 +2220,24 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.4.1...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.12.0...HEAD +[1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0 +[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2 +[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1 +[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0 +[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2 +[0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1 +[0.32.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.0 +[1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0 +[1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0 +[1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0 +[1.7.0/0.30.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.7.0 +[0.29.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.29.0 +[1.6.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.3 +[1.6.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.2 +[1.6.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.1 +[1.6.0/0.28.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.0 +[1.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.5.0 [1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1 [1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0 [1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS index 808755fe20cff..c4012ed6ca1c6 100644 --- a/vendor/go.opentelemetry.io/otel/CODEOWNERS +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -12,6 +12,6 @@ # https://help.github.com/en/articles/about-code-owners # -* @jmacd @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @paivagustavo @MadVikingGod @pellared +* @jmacd @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu CODEOWNERS @MrAlias @Aneurysm9 @MadVikingGod diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 7dead7084d3e2..9371a481ab127 100644 --- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -506,8 +506,9 @@ Approvers: - [Josh MacDonald](https://github.com/jmacd), LightStep - [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics - [David Ashpole](https://github.com/dashpole), Google -- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep - [Robert Pająk](https://github.com/pellared), Splunk +- [Chester Cheung](https://github.com/hanyuancheung), Tencent +- [Damien Mathieu](https://github.com/dmathieu), Auth0/Okta Maintainers: @@ -515,6 +516,10 @@ Maintainers: - [Anthony Mirabella](https://github.com/Aneurysm9), AWS - [Tyler Yahn](https://github.com/MrAlias), Splunk +Emeritus: + +- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep + ### Become an Approver or a Maintainer See the [community membership document in OpenTelemetry community diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile index b085561dbaaf4..befb040a77b39 100644 --- a/vendor/go.opentelemetry.io/otel/Makefile +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -17,7 +17,7 @@ TOOLS_MOD_DIR := ./internal/tools ALL_DOCS := $(shell find . -name '*.md' -type f | sort) ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS)) -ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example|^$(TOOLS_MOD_DIR)' | sort) +ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | grep -E -v '^./example|^$(TOOLS_MOD_DIR)' | sort) GO = go TIMEOUT = 60 @@ -25,7 +25,7 @@ TIMEOUT = 60 .DEFAULT_GOAL := precommit .PHONY: precommit ci -precommit: license-check misspell go-mod-tidy golangci-lint-fix test-default +precommit: dependabot-generate license-check vanity-import-fix misspell go-mod-tidy golangci-lint-fix test-default ci: dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage # Tools @@ -45,7 +45,13 @@ SEMCONVGEN = $(TOOLS)/semconvgen $(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen CROSSLINK = $(TOOLS)/crosslink -$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/crosslink +$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink + +SEMCONVKIT = $(TOOLS)/semconvkit +$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit + +DBOTCONF = $(TOOLS)/dbotconf +$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf GOLANGCI_LINT = $(TOOLS)/golangci-lint $(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint @@ -66,7 +72,7 @@ GOJQ = $(TOOLS)/gojq $(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq .PHONY: tools -tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) +tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) # Build @@ -123,6 +129,7 @@ test-coverage: | $(GOCOVMERGE) (cd "$${dir}" && \ $(GO) list ./... \ | grep -v third_party \ + | grep -v 'semconv/v.*' \ | xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \ $(GO) tool cover -html=coverage.out -o coverage.html); \ done; \ @@ -140,8 +147,8 @@ golangci-lint/%: | $(GOLANGCI_LINT) .PHONY: crosslink crosslink: | $(CROSSLINK) - @echo "cross-linking all go modules" \ - && $(CROSSLINK) + @echo "Updating intra-repository dependencies in all go modules" \ + && $(CROSSLINK) --root=$(shell pwd) --prune .PHONY: go-mod-tidy go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%) @@ -149,7 +156,7 @@ go-mod-tidy/%: DIR=$* go-mod-tidy/%: | crosslink @echo "$(GO) mod tidy in $(DIR)" \ && cd $(DIR) \ - && $(GO) mod tidy + && $(GO) mod tidy -compat=1.18 .PHONY: lint-modules lint-modules: go-mod-tidy @@ -159,7 +166,11 @@ lint: misspell lint-modules golangci-lint .PHONY: vanity-import-check vanity-import-check: | $(PORTO) - @$(PORTO) --include-internal -l . + @$(PORTO) --include-internal -l . || echo "(run: make vanity-import-fix)" + +.PHONY: vanity-import-fix +vanity-import-fix: | $(PORTO) + @$(PORTO) --include-internal -w . .PHONY: misspell misspell: | $(MISSPELL) @@ -167,7 +178,7 @@ misspell: | $(MISSPELL) .PHONY: license-check license-check: - @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*') ; do \ + @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \ awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=3 { found=1; next } END { if (!found) print FILENAME }' $$f; \ done); \ if [ -n "$${licRes}" ]; then \ @@ -175,20 +186,14 @@ license-check: exit 1; \ fi -DEPENDABOT_PATH=./.github/dependabot.yml +DEPENDABOT_CONFIG = .github/dependabot.yml .PHONY: dependabot-check -dependabot-check: - @result=$$( \ - for f in $$( find . -type f -name go.mod -exec dirname {} \; | sed 's/^.//' ); \ - do grep -q "directory: \+$$f" $(DEPENDABOT_PATH) \ - || echo "$$f"; \ - done; \ - ); \ - if [ -n "$$result" ]; then \ - echo "missing dependabot entry:"; echo "$$result"; \ - echo "new modules need to be added to the $(DEPENDABOT_PATH) file"; \ - exit 1; \ - fi +dependabot-check: | $(DBOTCONF) + @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || echo "(run: make dependabot-generate)" + +.PHONY: dependabot-generate +dependabot-generate: | $(DBOTCONF) + @$(DBOTCONF) generate > $(DEPENDABOT_CONFIG) .PHONY: check-clean-work-tree check-clean-work-tree: @@ -200,6 +205,15 @@ check-clean-work-tree: exit 1; \ fi +SEMCONVPKG ?= "semconv/" +.PHONY: semconv-generate +semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT) + [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry specification tag"; exit 1 ) + [ "$(OTEL_SPEC_REPO)" ] || ( echo "OTEL_SPEC_REPO unset: missing path to opentelemetry specification repo"; exit 1 ) + $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=span -p conventionType=trace -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=resource -p conventionType=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" + $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" + .PHONY: prerelease prerelease: | $(MULTIMOD) @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md index 21c2a71612e4d..1b2ee21fbf534 100644 --- a/vendor/go.opentelemetry.io/otel/README.md +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -30,27 +30,36 @@ Project versioning information and stability guarantees can be found in the ### Compatibility -OpenTelemetry-Go attempts to track the current supported versions of the -[Go language](https://golang.org/doc/devel/release#policy). The release -schedule after a new minor version of go is as follows: +OpenTelemetry-Go ensures compatibility with the current supported versions of +the [Go language](https://golang.org/doc/devel/release#policy): -- The first release or one month, which ever is sooner, will add build steps for the new go version. -- The first release after three months will remove support for the oldest go version. +> Each major Go release is supported until there are two newer major releases. +> For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release. -This project is tested on the following systems. +For versions of Go that are no longer supported upstream, opentelemetry-go will +stop ensuring compatibility with these versions in the following manner: + +- A minor release of opentelemetry-go will be made to add support for the new + supported release of Go. +- The following minor release of opentelemetry-go will remove compatibility + testing for the oldest (now archived upstream) version of Go. This, and + future, releases of opentelemetry-go may include features only supported by + the currently supported versions of Go. + +Currently, this project supports the following environments. | OS | Go Version | Architecture | | ------- | ---------- | ------------ | -| Ubuntu | 1.17 | amd64 | -| Ubuntu | 1.16 | amd64 | -| Ubuntu | 1.17 | 386 | -| Ubuntu | 1.16 | 386 | -| MacOS | 1.17 | amd64 | -| MacOS | 1.16 | amd64 | -| Windows | 1.17 | amd64 | -| Windows | 1.16 | amd64 | -| Windows | 1.17 | 386 | -| Windows | 1.16 | 386 | +| Ubuntu | 1.19 | amd64 | +| Ubuntu | 1.18 | amd64 | +| Ubuntu | 1.19 | 386 | +| Ubuntu | 1.18 | 386 | +| MacOS | 1.19 | amd64 | +| MacOS | 1.18 | amd64 | +| Windows | 1.19 | amd64 | +| Windows | 1.18 | amd64 | +| Windows | 1.19 | 386 | +| Windows | 1.18 | 386 | While this project should work for other systems, no compatibility guarantees are made for those systems currently. @@ -76,7 +85,7 @@ libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/ If you need to extend the telemetry an instrumentation library provides or want to build your own instrumentation for your application directly you will need to use the -[go.opentelemetry.io/otel/api](https://pkg.go.dev/go.opentelemetry.io/otel/api) +[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel) package. The included [examples](./example/) are a good way to see some practical uses of this process. @@ -95,8 +104,6 @@ All officially supported exporters for the OpenTelemetry project are contained i | [stdout](./exporters/stdout/) | ✓ | ✓ | | [Zipkin](./exporters/zipkin/) | | ✓ | -Additionally, OpenTelemetry community supported exporters can be found in the [contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/exporters). - ## Contributing See the [contributing documentation](CONTRIBUTING.md). diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md index e3bff66c6a957..77d56c936515a 100644 --- a/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -2,35 +2,28 @@ ## Semantic Convention Generation -If a new version of the OpenTelemetry Specification has been released it will be necessary to generate a new -semantic convention package from the YAML definitions in the specification repository. There is a `semconvgen` utility -installed by `make tools` that can be used to generate the a package with the name matching the specification -version number under the `semconv` package. This will ideally be done soon after the specification release is -tagged. Make sure that the specification repo contains a checkout of the the latest tagged release so that the -generated files match the released semantic conventions. +New versions of the [OpenTelemetry specification] mean new versions of the `semconv` package need to be generated. +The `semconv-generate` make target is used for this. -There are currently two categories of semantic conventions that must be generated, `resource` and `trace`. +1. Checkout a local copy of the [OpenTelemetry specification] to the desired release tag. +2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest` +3. Run the `make semconv-generate ...` target from this repository. -``` -.tools/semconvgen -i /path/to/specification/repo/semantic_conventions/resource -t semconv/template.j2 -.tools/semconvgen -i /path/to/specification/repo/semantic_conventions/trace -t semconv/template.j2 -``` - -Using default values for all options other than `input` will result in using the `template.j2` template to -generate `resource.go` and `trace.go` in `/path/to/otelgo/repo/semconv/`. +For example, -There are several ancillary files that are not generated and should be copied into the new package from the -prior package, with updates made as appropriate to canonical import path statements and constant values. -These files include: +```sh +export TAG="v1.13.0" # Change to the release version you are generating. +export OTEL_SPEC_REPO="/absolute/path/to/opentelemetry-specification" +git -C "$OTEL_SPEC_REPO" checkout "tags/$TAG" -b "$TAG" +docker pull otel/semconvgen:latest +make semconv-generate # Uses the exported TAG and OTEL_SPEC_REPO. +``` -* doc.go -* exception.go -* http(_test)?.go -* schema.go +This should create a new sub-package of [`semconv`](./semconv). +Ensure things look correct before submitting a pull request to include the addition. -Uses of the previous schema version in this repository should be updated to use the newly generated version. -No tooling for this exists at present, so use find/replace in your editor of choice or craft a `grep | sed` -pipeline if you like living on the edge. +**Note**, the generation code was changed to generate versions >= 1.13. +To generate versions prior to this, checkout the old release of this repository (i.e. [2fe8861](https://github.com/open-telemetry/opentelemetry-go/commit/2fe8861a24e20088c065b116089862caf9e3cd8b)). ## Pre-Release @@ -130,3 +123,5 @@ Once verified be sure to [make a release for the `contrib` repository](https://g Update [the documentation](./website_docs) for [the OpenTelemetry website](https://opentelemetry.io/docs/go/). Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate. + +[OpenTelemetry specification]: https://github.com/open-telemetry/opentelemetry-specification diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go index 8b940f78dc4a7..fe2bc5766cff9 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -21,19 +21,17 @@ import ( ) type ( - // Encoder is a mechanism for serializing a label set into a - // specific string representation that supports caching, to - // avoid repeated serialization. An example could be an - // exporter encoding the label set into a wire representation. + // Encoder is a mechanism for serializing an attribute set into a specific + // string representation that supports caching, to avoid repeated + // serialization. An example could be an exporter encoding the attribute + // set into a wire representation. Encoder interface { - // Encode returns the serialized encoding of the label - // set using its Iterator. This result may be cached - // by a attribute.Set. + // Encode returns the serialized encoding of the attribute set using + // its Iterator. This result may be cached by a attribute.Set. Encode(iterator Iterator) string - // ID returns a value that is unique for each class of - // label encoder. Label encoders allocate these using - // `NewEncoderID`. + // ID returns a value that is unique for each class of attribute + // encoder. Attribute encoders allocate these using `NewEncoderID`. ID() EncoderID } @@ -43,54 +41,53 @@ type ( value uint64 } - // defaultLabelEncoder uses a sync.Pool of buffers to reduce - // the number of allocations used in encoding labels. This - // implementation encodes a comma-separated list of key=value, - // with '/'-escaping of '=', ',', and '\'. - defaultLabelEncoder struct { - // pool is a pool of labelset builders. The buffers in this - // pool grow to a size that most label encodings will not - // allocate new memory. + // defaultAttrEncoder uses a sync.Pool of buffers to reduce the number of + // allocations used in encoding attributes. This implementation encodes a + // comma-separated list of key=value, with '/'-escaping of '=', ',', and + // '\'. + defaultAttrEncoder struct { + // pool is a pool of attribute set builders. The buffers in this pool + // grow to a size that most attribute encodings will not allocate new + // memory. pool sync.Pool // *bytes.Buffer } ) -// escapeChar is used to ensure uniqueness of the label encoding where -// keys or values contain either '=' or ','. Since there is no parser -// needed for this encoding and its only requirement is to be unique, -// this choice is arbitrary. Users will see these in some exporters -// (e.g., stdout), so the backslash ('\') is used as a conventional choice. +// escapeChar is used to ensure uniqueness of the attribute encoding where +// keys or values contain either '=' or ','. Since there is no parser needed +// for this encoding and its only requirement is to be unique, this choice is +// arbitrary. Users will see these in some exporters (e.g., stdout), so the +// backslash ('\') is used as a conventional choice. const escapeChar = '\\' var ( - _ Encoder = &defaultLabelEncoder{} + _ Encoder = &defaultAttrEncoder{} - // encoderIDCounter is for generating IDs for other label - // encoders. + // encoderIDCounter is for generating IDs for other attribute encoders. encoderIDCounter uint64 defaultEncoderOnce sync.Once defaultEncoderID = NewEncoderID() - defaultEncoderInstance *defaultLabelEncoder + defaultEncoderInstance *defaultAttrEncoder ) -// NewEncoderID returns a unique label encoder ID. It should be -// called once per each type of label encoder. Preferably in init() or -// in var definition. +// NewEncoderID returns a unique attribute encoder ID. It should be called +// once per each type of attribute encoder. Preferably in init() or in var +// definition. func NewEncoderID() EncoderID { return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)} } -// DefaultEncoder returns a label encoder that encodes labels -// in such a way that each escaped label's key is followed by an equal -// sign and then by an escaped label's value. All key-value pairs are -// separated by a comma. +// DefaultEncoder returns an attribute encoder that encodes attributes in such +// a way that each escaped attribute's key is followed by an equal sign and +// then by an escaped attribute's value. All key-value pairs are separated by +// a comma. // -// Escaping is done by prepending a backslash before either a -// backslash, equal sign or a comma. +// Escaping is done by prepending a backslash before either a backslash, equal +// sign or a comma. func DefaultEncoder() Encoder { defaultEncoderOnce.Do(func() { - defaultEncoderInstance = &defaultLabelEncoder{ + defaultEncoderInstance = &defaultAttrEncoder{ pool: sync.Pool{ New: func() interface{} { return &bytes.Buffer{} @@ -101,15 +98,14 @@ func DefaultEncoder() Encoder { return defaultEncoderInstance } -// Encode is a part of an implementation of the LabelEncoder -// interface. -func (d *defaultLabelEncoder) Encode(iter Iterator) string { +// Encode is a part of an implementation of the AttributeEncoder interface. +func (d *defaultAttrEncoder) Encode(iter Iterator) string { buf := d.pool.Get().(*bytes.Buffer) defer d.pool.Put(buf) buf.Reset() for iter.Next() { - i, keyValue := iter.IndexedLabel() + i, keyValue := iter.IndexedAttribute() if i > 0 { _, _ = buf.WriteRune(',') } @@ -126,8 +122,8 @@ func (d *defaultLabelEncoder) Encode(iter Iterator) string { return buf.String() } -// ID is a part of an implementation of the LabelEncoder interface. -func (*defaultLabelEncoder) ID() EncoderID { +// ID is a part of an implementation of the AttributeEncoder interface. +func (*defaultAttrEncoder) ID() EncoderID { return defaultEncoderID } @@ -137,9 +133,9 @@ func copyAndEscape(buf *bytes.Buffer, val string) { for _, ch := range val { switch ch { case '=', ',', escapeChar: - buf.WriteRune(escapeChar) + _, _ = buf.WriteRune(escapeChar) } - buf.WriteRune(ch) + _, _ = buf.WriteRune(ch) } } diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go index e03aabb62bd33..841b271fb7dd7 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/iterator.go +++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go @@ -14,16 +14,16 @@ package attribute // import "go.opentelemetry.io/otel/attribute" -// Iterator allows iterating over the set of labels in order, -// sorted by key. +// Iterator allows iterating over the set of attributes in order, sorted by +// key. type Iterator struct { storage *Set idx int } -// MergeIterator supports iterating over two sets of labels while -// eliminating duplicate values from the combined set. The first -// iterator value takes precedence. +// MergeIterator supports iterating over two sets of attributes while +// eliminating duplicate values from the combined set. The first iterator +// value takes precedence. type MergeIterator struct { one oneIterator two oneIterator @@ -31,13 +31,13 @@ type MergeIterator struct { } type oneIterator struct { - iter Iterator - done bool - label KeyValue + iter Iterator + done bool + attr KeyValue } -// Next moves the iterator to the next position. Returns false if there -// are no more labels. +// Next moves the iterator to the next position. Returns false if there are no +// more attributes. func (i *Iterator) Next() bool { i.idx++ return i.idx < i.Len() @@ -45,30 +45,41 @@ func (i *Iterator) Next() bool { // Label returns current KeyValue. Must be called only after Next returns // true. +// +// Deprecated: Use Attribute instead. func (i *Iterator) Label() KeyValue { - kv, _ := i.storage.Get(i.idx) - return kv + return i.Attribute() } -// Attribute is a synonym for Label(). +// Attribute returns the current KeyValue of the Iterator. It must be called +// only after Next returns true. func (i *Iterator) Attribute() KeyValue { - return i.Label() + kv, _ := i.storage.Get(i.idx) + return kv } // IndexedLabel returns current index and attribute. Must be called only // after Next returns true. +// +// Deprecated: Use IndexedAttribute instead. func (i *Iterator) IndexedLabel() (int, KeyValue) { - return i.idx, i.Label() + return i.idx, i.Attribute() } -// Len returns a number of labels in the iterator's `*Set`. +// IndexedAttribute returns current index and attribute. Must be called only +// after Next returns true. +func (i *Iterator) IndexedAttribute() (int, KeyValue) { + return i.idx, i.Attribute() +} + +// Len returns a number of attributes in the iterated set. func (i *Iterator) Len() int { return i.storage.Len() } -// ToSlice is a convenience function that creates a slice of labels -// from the passed iterator. The iterator is set up to start from the -// beginning before creating the slice. +// ToSlice is a convenience function that creates a slice of attributes from +// the passed iterator. The iterator is set up to start from the beginning +// before creating the slice. func (i *Iterator) ToSlice() []KeyValue { l := i.Len() if l == 0 { @@ -77,12 +88,12 @@ func (i *Iterator) ToSlice() []KeyValue { i.idx = -1 slice := make([]KeyValue, 0, l) for i.Next() { - slice = append(slice, i.Label()) + slice = append(slice, i.Attribute()) } return slice } -// NewMergeIterator returns a MergeIterator for merging two label sets +// NewMergeIterator returns a MergeIterator for merging two attribute sets. // Duplicates are resolved by taking the value from the first set. func NewMergeIterator(s1, s2 *Set) MergeIterator { mi := MergeIterator{ @@ -102,42 +113,49 @@ func makeOne(iter Iterator) oneIterator { func (oi *oneIterator) advance() { if oi.done = !oi.iter.Next(); !oi.done { - oi.label = oi.iter.Label() + oi.attr = oi.iter.Attribute() } } -// Next returns true if there is another label available. +// Next returns true if there is another attribute available. func (m *MergeIterator) Next() bool { if m.one.done && m.two.done { return false } if m.one.done { - m.current = m.two.label + m.current = m.two.attr m.two.advance() return true } if m.two.done { - m.current = m.one.label + m.current = m.one.attr m.one.advance() return true } - if m.one.label.Key == m.two.label.Key { - m.current = m.one.label // first iterator label value wins + if m.one.attr.Key == m.two.attr.Key { + m.current = m.one.attr // first iterator attribute value wins m.one.advance() m.two.advance() return true } - if m.one.label.Key < m.two.label.Key { - m.current = m.one.label + if m.one.attr.Key < m.two.attr.Key { + m.current = m.one.attr m.one.advance() return true } - m.current = m.two.label + m.current = m.two.attr m.two.advance() return true } // Label returns the current value after Next() returns true. +// +// Deprecated: Use Attribute instead. func (m *MergeIterator) Label() KeyValue { return m.current } + +// Attribute returns the current value after Next() returns true. +func (m *MergeIterator) Attribute() KeyValue { + return m.current +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go index a28f1435cb9c8..26be5983223bf 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/set.go +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -21,49 +21,42 @@ import ( ) type ( - // Set is the representation for a distinct label set. It - // manages an immutable set of labels, with an internal cache - // for storing label encodings. + // Set is the representation for a distinct attribute set. It manages an + // immutable set of attributes, with an internal cache for storing + // attribute encodings. // - // This type supports the `Equivalent` method of comparison - // using values of type `Distinct`. - // - // This type is used to implement: - // 1. Metric labels - // 2. Resource sets - // 3. Correlation map (TODO) + // This type supports the Equivalent method of comparison using values of + // type Distinct. Set struct { equivalent Distinct } - // Distinct wraps a variable-size array of `KeyValue`, - // constructed with keys in sorted order. This can be used as - // a map key or for equality checking between Sets. + // Distinct wraps a variable-size array of KeyValue, constructed with keys + // in sorted order. This can be used as a map key or for equality checking + // between Sets. Distinct struct { iface interface{} } - // Filter supports removing certain labels from label sets. - // When the filter returns true, the label will be kept in - // the filtered label set. When the filter returns false, the - // label is excluded from the filtered label set, and the - // label instead appears in the `removed` list of excluded labels. + // Filter supports removing certain attributes from attribute sets. When + // the filter returns true, the attribute will be kept in the filtered + // attribute set. When the filter returns false, the attribute is excluded + // from the filtered attribute set, and the attribute instead appears in + // the removed list of excluded attributes. Filter func(KeyValue) bool - // Sortable implements `sort.Interface`, used for sorting - // `KeyValue`. This is an exported type to support a - // memory optimization. A pointer to one of these is needed - // for the call to `sort.Stable()`, which the caller may - // provide in order to avoid an allocation. See - // `NewSetWithSortable()`. + // Sortable implements sort.Interface, used for sorting KeyValue. This is + // an exported type to support a memory optimization. A pointer to one of + // these is needed for the call to sort.Stable(), which the caller may + // provide in order to avoid an allocation. See NewSetWithSortable(). Sortable []KeyValue ) var ( - // keyValueType is used in `computeDistinctReflect`. + // keyValueType is used in computeDistinctReflect. keyValueType = reflect.TypeOf(KeyValue{}) - // emptySet is returned for empty label sets. + // emptySet is returned for empty attribute sets. emptySet = &Set{ equivalent: Distinct{ iface: [0]KeyValue{}, @@ -78,30 +71,30 @@ func EmptySet() *Set { return emptySet } -// reflect abbreviates `reflect.ValueOf`. -func (d Distinct) reflect() reflect.Value { +// reflectValue abbreviates reflect.ValueOf(d). +func (d Distinct) reflectValue() reflect.Value { return reflect.ValueOf(d.iface) } -// Valid returns true if this value refers to a valid `*Set`. +// Valid returns true if this value refers to a valid Set. func (d Distinct) Valid() bool { return d.iface != nil } -// Len returns the number of labels in this set. +// Len returns the number of attributes in this set. func (l *Set) Len() int { if l == nil || !l.equivalent.Valid() { return 0 } - return l.equivalent.reflect().Len() + return l.equivalent.reflectValue().Len() } -// Get returns the KeyValue at ordered position `idx` in this set. +// Get returns the KeyValue at ordered position idx in this set. func (l *Set) Get(idx int) (KeyValue, bool) { if l == nil { return KeyValue{}, false } - value := l.equivalent.reflect() + value := l.equivalent.reflectValue() if idx >= 0 && idx < value.Len() { // Note: The Go compiler successfully avoids an allocation for @@ -117,7 +110,7 @@ func (l *Set) Value(k Key) (Value, bool) { if l == nil { return Value{}, false } - rValue := l.equivalent.reflect() + rValue := l.equivalent.reflectValue() vlen := rValue.Len() idx := sort.Search(vlen, func(idx int) bool { @@ -142,7 +135,7 @@ func (l *Set) HasValue(k Key) bool { return ok } -// Iter returns an iterator for visiting the labels in this set. +// Iter returns an iterator for visiting the attributes in this set. func (l *Set) Iter() Iterator { return Iterator{ storage: l, @@ -150,18 +143,17 @@ func (l *Set) Iter() Iterator { } } -// ToSlice returns the set of labels belonging to this set, sorted, -// where keys appear no more than once. +// ToSlice returns the set of attributes belonging to this set, sorted, where +// keys appear no more than once. func (l *Set) ToSlice() []KeyValue { iter := l.Iter() return iter.ToSlice() } -// Equivalent returns a value that may be used as a map key. The -// Distinct type guarantees that the result will equal the equivalent -// Distinct value of any label set with the same elements as this, -// where sets are made unique by choosing the last value in the input -// for any given key. +// Equivalent returns a value that may be used as a map key. The Distinct type +// guarantees that the result will equal the equivalent. Distinct value of any +// attribute set with the same elements as this, where sets are made unique by +// choosing the last value in the input for any given key. func (l *Set) Equivalent() Distinct { if l == nil || !l.equivalent.Valid() { return emptySet.equivalent @@ -174,8 +166,7 @@ func (l *Set) Equals(o *Set) bool { return l.Equivalent() == o.Equivalent() } -// Encoded returns the encoded form of this set, according to -// `encoder`. +// Encoded returns the encoded form of this set, according to encoder. func (l *Set) Encoded(encoder Encoder) string { if l == nil || encoder == nil { return "" @@ -190,11 +181,11 @@ func empty() Set { } } -// NewSet returns a new `Set`. See the documentation for -// `NewSetWithSortableFiltered` for more details. +// NewSet returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. // -// Except for empty sets, this method adds an additional allocation -// compared with calls that include a `*Sortable`. +// Except for empty sets, this method adds an additional allocation compared +// with calls that include a Sortable. func NewSet(kvs ...KeyValue) Set { // Check for empty set. if len(kvs) == 0 { @@ -204,10 +195,10 @@ func NewSet(kvs ...KeyValue) Set { return s } -// NewSetWithSortable returns a new `Set`. See the documentation for -// `NewSetWithSortableFiltered` for more details. +// NewSetWithSortable returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. // -// This call includes a `*Sortable` option as a memory optimization. +// This call includes a Sortable option as a memory optimization. func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set { // Check for empty set. if len(kvs) == 0 { @@ -217,12 +208,11 @@ func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set { return s } -// NewSetWithFiltered returns a new `Set`. See the documentation for -// `NewSetWithSortableFiltered` for more details. +// NewSetWithFiltered returns a new Set. See the documentation for +// NewSetWithSortableFiltered for more details. // -// This call includes a `Filter` to include/exclude label keys from -// the return value. Excluded keys are returned as a slice of label -// values. +// This call includes a Filter to include/exclude attribute keys from the +// return value. Excluded keys are returned as a slice of attribute values. func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { // Check for empty set. if len(kvs) == 0 { @@ -231,7 +221,7 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { return NewSetWithSortableFiltered(kvs, new(Sortable), filter) } -// NewSetWithSortableFiltered returns a new `Set`. +// NewSetWithSortableFiltered returns a new Set. // // Duplicate keys are eliminated by taking the last value. This // re-orders the input slice so that unique last-values are contiguous @@ -243,17 +233,16 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { // - Caller sees the reordering, but doesn't lose values // - Repeated call preserve last-value wins. // -// Note that methods are defined on `*Set`, although this returns `Set`. -// Callers can avoid memory allocations by: +// Note that methods are defined on Set, although this returns Set. Callers +// can avoid memory allocations by: // -// - allocating a `Sortable` for use as a temporary in this method -// - allocating a `Set` for storing the return value of this -// constructor. +// - allocating a Sortable for use as a temporary in this method +// - allocating a Set for storing the return value of this constructor. // -// The result maintains a cache of encoded labels, by attribute.EncoderID. +// The result maintains a cache of encoded attributes, by attribute.EncoderID. // This value should not be copied after its first use. // -// The second `[]KeyValue` return value is a list of labels that were +// The second []KeyValue return value is a list of attributes that were // excluded by the Filter (if non-nil). func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) { // Check for empty set. @@ -293,13 +282,13 @@ func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (S }, nil } -// filterSet reorders `kvs` so that included keys are contiguous at -// the end of the slice, while excluded keys precede the included keys. +// filterSet reorders kvs so that included keys are contiguous at the end of +// the slice, while excluded keys precede the included keys. func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) { var excluded []KeyValue - // Move labels that do not match the filter so - // they're adjacent before calling computeDistinct(). + // Move attributes that do not match the filter so they're adjacent before + // calling computeDistinct(). distinctPosition := len(kvs) // Swap indistinct keys forward and distinct keys toward the @@ -319,8 +308,8 @@ func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) { }, excluded } -// Filter returns a filtered copy of this `Set`. See the -// documentation for `NewSetWithSortableFiltered` for more details. +// Filter returns a filtered copy of this Set. See the documentation for +// NewSetWithSortableFiltered for more details. func (l *Set) Filter(re Filter) (Set, []KeyValue) { if re == nil { return Set{ @@ -333,9 +322,9 @@ func (l *Set) Filter(re Filter) (Set, []KeyValue) { return filterSet(l.ToSlice(), re) } -// computeDistinct returns a `Distinct` using either the fixed- or -// reflect-oriented code path, depending on the size of the input. -// The input slice is assumed to already be sorted and de-duplicated. +// computeDistinct returns a Distinct using either the fixed- or +// reflect-oriented code path, depending on the size of the input. The input +// slice is assumed to already be sorted and de-duplicated. func computeDistinct(kvs []KeyValue) Distinct { iface := computeDistinctFixed(kvs) if iface == nil { @@ -346,8 +335,8 @@ func computeDistinct(kvs []KeyValue) Distinct { } } -// computeDistinctFixed computes a `Distinct` for small slices. It -// returns nil if the input is too large for this code path. +// computeDistinctFixed computes a Distinct for small slices. It returns nil +// if the input is too large for this code path. func computeDistinctFixed(kvs []KeyValue) interface{} { switch len(kvs) { case 1: @@ -395,8 +384,8 @@ func computeDistinctFixed(kvs []KeyValue) interface{} { } } -// computeDistinctReflect computes a `Distinct` using reflection, -// works for any size input. +// computeDistinctReflect computes a Distinct using reflection, works for any +// size input. func computeDistinctReflect(kvs []KeyValue) interface{} { at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() for i, keyValue := range kvs { @@ -405,7 +394,7 @@ func computeDistinctReflect(kvs []KeyValue) interface{} { return at.Interface() } -// MarshalJSON returns the JSON encoding of the `*Set`. +// MarshalJSON returns the JSON encoding of the Set. func (l *Set) MarshalJSON() ([]byte, error) { return json.Marshal(l.equivalent.iface) } @@ -419,17 +408,17 @@ func (l Set) MarshalLog() interface{} { return kvs } -// Len implements `sort.Interface`. +// Len implements sort.Interface. func (l *Sortable) Len() int { return len(*l) } -// Swap implements `sort.Interface`. +// Swap implements sort.Interface. func (l *Sortable) Swap(i, j int) { (*l)[i], (*l)[j] = (*l)[j], (*l)[i] } -// Less implements `sort.Interface`. +// Less implements sort.Interface. func (l *Sortable) Less(i, j int) bool { return (*l)[i].Key < (*l)[j].Key } diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go index 6ec5cb290dfa4..34a4e548dd1d5 100644 --- a/vendor/go.opentelemetry.io/otel/attribute/value.go +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -17,15 +17,17 @@ package attribute // import "go.opentelemetry.io/otel/attribute" import ( "encoding/json" "fmt" + "reflect" "strconv" "go.opentelemetry.io/otel/internal" + "go.opentelemetry.io/otel/internal/attribute" ) //go:generate stringer -type=Type // Type describes the type of the data Value holds. -type Type int +type Type int // nolint: revive // redefines builtin Type. // Value represents the value part in key-value pairs. type Value struct { @@ -66,12 +68,7 @@ func BoolValue(v bool) Value { // BoolSliceValue creates a BOOLSLICE Value. func BoolSliceValue(v []bool) Value { - cp := make([]bool, len(v)) - copy(cp, v) - return Value{ - vtype: BOOLSLICE, - slice: &cp, - } + return Value{vtype: BOOLSLICE, slice: attribute.SliceValue(v)} } // IntValue creates an INT64 Value. @@ -81,13 +78,14 @@ func IntValue(v int) Value { // IntSliceValue creates an INTSLICE Value. func IntSliceValue(v []int) Value { - cp := make([]int64, 0, len(v)) - for _, i := range v { - cp = append(cp, int64(i)) + var int64Val int64 + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val))) + for i, val := range v { + cp.Elem().Index(i).SetInt(int64(val)) } return Value{ vtype: INT64SLICE, - slice: &cp, + slice: cp.Elem().Interface(), } } @@ -101,12 +99,7 @@ func Int64Value(v int64) Value { // Int64SliceValue creates an INT64SLICE Value. func Int64SliceValue(v []int64) Value { - cp := make([]int64, len(v)) - copy(cp, v) - return Value{ - vtype: INT64SLICE, - slice: &cp, - } + return Value{vtype: INT64SLICE, slice: attribute.SliceValue(v)} } // Float64Value creates a FLOAT64 Value. @@ -119,12 +112,7 @@ func Float64Value(v float64) Value { // Float64SliceValue creates a FLOAT64SLICE Value. func Float64SliceValue(v []float64) Value { - cp := make([]float64, len(v)) - copy(cp, v) - return Value{ - vtype: FLOAT64SLICE, - slice: &cp, - } + return Value{vtype: FLOAT64SLICE, slice: attribute.SliceValue(v)} } // StringValue creates a STRING Value. @@ -137,12 +125,7 @@ func StringValue(v string) Value { // StringSliceValue creates a STRINGSLICE Value. func StringSliceValue(v []string) Value { - cp := make([]string, len(v)) - copy(cp, v) - return Value{ - vtype: STRINGSLICE, - slice: &cp, - } + return Value{vtype: STRINGSLICE, slice: attribute.SliceValue(v)} } // Type returns a type of the Value. @@ -159,10 +142,14 @@ func (v Value) AsBool() bool { // AsBoolSlice returns the []bool value. Make sure that the Value's type is // BOOLSLICE. func (v Value) AsBoolSlice() []bool { - if s, ok := v.slice.(*[]bool); ok { - return *s + if v.vtype != BOOLSLICE { + return nil } - return nil + return v.asBoolSlice() +} + +func (v Value) asBoolSlice() []bool { + return attribute.AsSlice[bool](v.slice) } // AsInt64 returns the int64 value. Make sure that the Value's type is @@ -174,10 +161,14 @@ func (v Value) AsInt64() int64 { // AsInt64Slice returns the []int64 value. Make sure that the Value's type is // INT64SLICE. func (v Value) AsInt64Slice() []int64 { - if s, ok := v.slice.(*[]int64); ok { - return *s + if v.vtype != INT64SLICE { + return nil } - return nil + return v.asInt64Slice() +} + +func (v Value) asInt64Slice() []int64 { + return attribute.AsSlice[int64](v.slice) } // AsFloat64 returns the float64 value. Make sure that the Value's @@ -189,10 +180,14 @@ func (v Value) AsFloat64() float64 { // AsFloat64Slice returns the []float64 value. Make sure that the Value's type is // FLOAT64SLICE. func (v Value) AsFloat64Slice() []float64 { - if s, ok := v.slice.(*[]float64); ok { - return *s + if v.vtype != FLOAT64SLICE { + return nil } - return nil + return v.asFloat64Slice() +} + +func (v Value) asFloat64Slice() []float64 { + return attribute.AsSlice[float64](v.slice) } // AsString returns the string value. Make sure that the Value's type @@ -204,10 +199,14 @@ func (v Value) AsString() string { // AsStringSlice returns the []string value. Make sure that the Value's type is // STRINGSLICE. func (v Value) AsStringSlice() []string { - if s, ok := v.slice.(*[]string); ok { - return *s + if v.vtype != STRINGSLICE { + return nil } - return nil + return v.asStringSlice() +} + +func (v Value) asStringSlice() []string { + return attribute.AsSlice[string](v.slice) } type unknownValueType struct{} @@ -218,19 +217,19 @@ func (v Value) AsInterface() interface{} { case BOOL: return v.AsBool() case BOOLSLICE: - return v.AsBoolSlice() + return v.asBoolSlice() case INT64: return v.AsInt64() case INT64SLICE: - return v.AsInt64Slice() + return v.asInt64Slice() case FLOAT64: return v.AsFloat64() case FLOAT64SLICE: - return v.AsFloat64Slice() + return v.asFloat64Slice() case STRING: return v.stringly case STRINGSLICE: - return v.AsStringSlice() + return v.asStringSlice() } return unknownValueType{} } @@ -239,19 +238,19 @@ func (v Value) AsInterface() interface{} { func (v Value) Emit() string { switch v.Type() { case BOOLSLICE: - return fmt.Sprint(*(v.slice.(*[]bool))) + return fmt.Sprint(v.asBoolSlice()) case BOOL: return strconv.FormatBool(v.AsBool()) case INT64SLICE: - return fmt.Sprint(*(v.slice.(*[]int64))) + return fmt.Sprint(v.asInt64Slice()) case INT64: return strconv.FormatInt(v.AsInt64(), 10) case FLOAT64SLICE: - return fmt.Sprint(*(v.slice.(*[]float64))) + return fmt.Sprint(v.asFloat64Slice()) case FLOAT64: return fmt.Sprint(v.AsFloat64()) case STRINGSLICE: - return fmt.Sprint(*(v.slice.(*[]string))) + return fmt.Sprint(v.asStringSlice()) case STRING: return v.stringly default: diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go index 824c67b27a32e..a36db8f8d85d7 100644 --- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -68,6 +68,9 @@ type Property struct { hasData bool } +// NewKeyProperty returns a new Property for key. +// +// If key is invalid, an error will be returned. func NewKeyProperty(key string) (Property, error) { if !keyRe.MatchString(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) @@ -77,6 +80,9 @@ func NewKeyProperty(key string) (Property, error) { return p, nil } +// NewKeyValueProperty returns a new Property for key with value. +// +// If key or value are invalid, an error will be returned. func NewKeyValueProperty(key, value string) (Property, error) { if !keyRe.MatchString(key) { return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) @@ -151,7 +157,7 @@ func (p Property) Key() string { return p.key } -// Value returns the Property value. Additionally a boolean value is returned +// Value returns the Property value. Additionally, a boolean value is returned // indicating if the returned value is the empty if the Property has a value // that is empty or if the value is not set. func (p Property) Value() (string, bool) { @@ -244,8 +250,9 @@ type Member struct { hasData bool } -// NewMember returns a new Member from the passed arguments. An error is -// returned if the created Member would be invalid according to the W3C +// NewMember returns a new Member from the passed arguments. The key will be +// used directly while the value will be url decoded after validation. An error +// is returned if the created Member would be invalid according to the W3C // Baggage specification. func NewMember(key, value string, props ...Property) (Member, error) { m := Member{ @@ -257,7 +264,11 @@ func NewMember(key, value string, props ...Property) (Member, error) { if err := m.validate(); err != nil { return newInvalidMember(), err } - + decodedValue, err := url.QueryUnescape(value) + if err != nil { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + m.value = decodedValue return m, nil } @@ -322,8 +333,9 @@ func parseMember(member string) (Member, error) { return Member{key: key, value: value, properties: props, hasData: true}, nil } -// validate ensures m conforms to the W3C Baggage specification, returning an -// error otherwise. +// validate ensures m conforms to the W3C Baggage specification. +// A key is just an ASCII string, but a value must be URL encoded UTF-8, +// returning an error otherwise. func (m Member) validate() error { if !m.hasData { return fmt.Errorf("%w: %q", errInvalidMember, m) @@ -386,7 +398,7 @@ func New(members ...Member) (Baggage, error) { } } - // Check member numbers after deduplicating. + // Check member numbers after deduplication. if len(b) > maxMembers { return Baggage{}, errMemberNumber } @@ -448,7 +460,7 @@ func Parse(bStr string) (Baggage, error) { func (b Baggage) Member(key string) Member { v, ok := b.list[key] if !ok { - // We do not need to worry about distiguising between the situation + // We do not need to worry about distinguishing between the situation // where a zero-valued Member is included in the Baggage because a // zero-valued Member is invalid according to the W3C Baggage // specification (it has an empty key). @@ -459,6 +471,7 @@ func (b Baggage) Member(key string) Member { key: key, value: v.Value, properties: fromInternalProperties(v.Properties), + hasData: true, } } @@ -478,6 +491,7 @@ func (b Baggage) Members() []Member { key: k, value: v.Value, properties: fromInternalProperties(v.Properties), + hasData: true, }) } return members diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go index 064a9279fd14a..587ebae4e30e7 100644 --- a/vendor/go.opentelemetry.io/otel/codes/codes.go +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -23,10 +23,20 @@ import ( const ( // Unset is the default status code. Unset Code = 0 + // Error indicates the operation contains an error. + // + // NOTE: The error code in OTLP is 2. + // The value of this enum is only relevant to the internals + // of the Go SDK. Error Code = 1 + // Ok indicates operation has been validated by an Application developers // or Operator to have completed successfully, or contain no error. + // + // NOTE: The Ok code in OTLP is 1. + // The value of this enum is only relevant to the internals + // of the Go SDK. Ok Code = 2 maxCode = 3 diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/config.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/config.go new file mode 100644 index 0000000000000..b3fd45d9d31b7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/config.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal contains common functionality for all OTLP exporters. +package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal" + +import ( + "fmt" + "path" + "strings" +) + +// CleanPath returns a path with all spaces trimmed and all redundancies removed. If urlPath is empty or cleaning it results in an empty string, defaultPath is returned instead. +func CleanPath(urlPath string, defaultPath string) string { + tmp := path.Clean(strings.TrimSpace(urlPath)) + if tmp == "." { + return defaultPath + } + if !path.IsAbs(tmp) { + tmp = fmt.Sprintf("/%s", tmp) + } + return tmp +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go new file mode 100644 index 0000000000000..53ff3126b6e9f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/envconfig/envconfig.go @@ -0,0 +1,199 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/internal/envconfig" + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "net/url" + "strconv" + "strings" + "time" + + "go.opentelemetry.io/otel/internal/global" +) + +// ConfigFn is the generic function used to set a config. +type ConfigFn func(*EnvOptionsReader) + +// EnvOptionsReader reads the required environment variables. +type EnvOptionsReader struct { + GetEnv func(string) string + ReadFile func(string) ([]byte, error) + Namespace string +} + +// Apply runs every ConfigFn. +func (e *EnvOptionsReader) Apply(opts ...ConfigFn) { + for _, o := range opts { + o(e) + } +} + +// GetEnvValue gets an OTLP environment variable value of the specified key +// using the GetEnv function. +// This function prepends the OTLP specified namespace to all key lookups. +func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) { + v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key))) + return v, v != "" +} + +// WithString retrieves the specified config and passes it to ConfigFn as a string. +func WithString(n string, fn func(string)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + fn(v) + } + } +} + +// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn. +func WithBool(n string, fn func(bool)) ConfigFn { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + b := strings.ToLower(v) == "true" + fn(b) + } + } +} + +// WithDuration retrieves the specified config and passes it to ConfigFn as a duration. +func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + d, err := strconv.Atoi(v) + if err != nil { + global.Error(err, "parse duration", "input", v) + return + } + fn(time.Duration(d) * time.Millisecond) + } + } +} + +// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers. +func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + fn(stringToHeader(v)) + } + } +} + +// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL. +func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + u, err := url.Parse(v) + if err != nil { + global.Error(err, "parse url", "input", v) + return + } + fn(u) + } + } +} + +// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn. +func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn { + return func(e *EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + b, err := e.ReadFile(v) + if err != nil { + global.Error(err, "read tls ca cert file", "file", v) + return + } + c, err := createCertPool(b) + if err != nil { + global.Error(err, "create tls cert pool") + return + } + fn(c) + } + } +} + +// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn. +func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn { + return func(e *EnvOptionsReader) { + vc, okc := e.GetEnvValue(nc) + vk, okk := e.GetEnvValue(nk) + if !okc || !okk { + return + } + cert, err := e.ReadFile(vc) + if err != nil { + global.Error(err, "read tls client cert", "file", vc) + return + } + key, err := e.ReadFile(vk) + if err != nil { + global.Error(err, "read tls client key", "file", vk) + return + } + crt, err := tls.X509KeyPair(cert, key) + if err != nil { + global.Error(err, "create tls client key pair") + return + } + fn(crt) + } +} + +func keyWithNamespace(ns, key string) string { + if ns == "" { + return key + } + return fmt.Sprintf("%s_%s", ns, key) +} + +func stringToHeader(value string) map[string]string { + headersPairs := strings.Split(value, ",") + headers := make(map[string]string) + + for _, header := range headersPairs { + nameValue := strings.SplitN(header, "=", 2) + if len(nameValue) < 2 { + global.Error(errors.New("missing '="), "parse headers", "input", nameValue) + continue + } + name, err := url.QueryUnescape(nameValue[0]) + if err != nil { + global.Error(err, "escape header key", "key", nameValue[0]) + continue + } + trimmedName := strings.TrimSpace(name) + value, err := url.QueryUnescape(nameValue[1]) + if err != nil { + global.Error(err, "escape header value", "value", nameValue[1]) + continue + } + trimmedValue := strings.TrimSpace(value) + + headers[trimmedName] = trimmedValue + } + + return headers +} + +func createCertPool(certBytes []byte) (*x509.CertPool, error) { + cp := x509.NewCertPool() + if ok := cp.AppendCertsFromPEM(certBytes); !ok { + return nil, errors.New("failed to append certificate to the cert pool") + } + return cp, nil +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/header.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/header.go new file mode 100644 index 0000000000000..9aa62ed9e8e9c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/header.go @@ -0,0 +1,24 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal contains common functionality for all OTLP exporters. +package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal" + +import "go.opentelemetry.io/otel" + +// GetUserAgentHeader return an OTLP header value form "OTel OTLP Exporter Go/{{ .Version }}" +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#user-agent +func GetUserAgentHeader() string { + return "OTel OTLP Exporter Go/" + otel.Version() +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go new file mode 100644 index 0000000000000..9ab89b375747c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/partialsuccess.go @@ -0,0 +1,64 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal" + +import "fmt" + +// PartialSuccess represents the underlying error for all handling +// OTLP partial success messages. Use `errors.Is(err, +// PartialSuccess{})` to test whether an error passed to the OTel +// error handler belongs to this category. +type PartialSuccess struct { + ErrorMessage string + RejectedItems int64 + RejectedKind string +} + +var _ error = PartialSuccess{} + +// Error implements the error interface. +func (ps PartialSuccess) Error() string { + msg := ps.ErrorMessage + if msg == "" { + msg = "empty message" + } + return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind) +} + +// Is supports the errors.Is() interface. +func (ps PartialSuccess) Is(err error) bool { + _, ok := err.(PartialSuccess) + return ok +} + +// TracePartialSuccessError returns an error describing a partial success +// response for the trace signal. +func TracePartialSuccessError(itemsRejected int64, errorMessage string) error { + return PartialSuccess{ + ErrorMessage: errorMessage, + RejectedItems: itemsRejected, + RejectedKind: "spans", + } +} + +// MetricPartialSuccessError returns an error describing a partial success +// response for the metric signal. +func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error { + return PartialSuccess{ + ErrorMessage: errorMessage, + RejectedItems: itemsRejected, + RejectedKind: "metric data points", + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go index 3d43f7aea97dd..2d14d248336ba 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/retry/retry.go @@ -119,8 +119,8 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { delay = throttle } - if err := waitFunc(ctx, delay); err != nil { - return err + if ctxErr := waitFunc(ctx, delay); ctxErr != nil { + return fmt.Errorf("%w: %s", ctxErr, err) } } } @@ -129,6 +129,9 @@ func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc { // Allow override for testing. var waitFunc = wait +// wait takes the caller's context, and the amount of time to wait. It will +// return nil if the timer fires before or at the same time as the context's +// deadline. This indicates that the call can be retried. func wait(ctx context.Context, delay time.Duration) error { timer := time.NewTimer(delay) defer timer.Stop() diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/wrappederror.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/wrappederror.go new file mode 100644 index 0000000000000..217751da552f9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/internal/wrappederror.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/exporters/otlp/internal" + +// ErrorKind is used to identify the kind of export error +// being wrapped. +type ErrorKind int + +const ( + // TracesExport indicates the error comes from the OTLP trace exporter. + TracesExport ErrorKind = iota +) + +// prefix returns a prefix for the Error() string. +func (k ErrorKind) prefix() string { + switch k { + case TracesExport: + return "traces export: " + default: + return "unknown: " + } +} + +// wrappedExportError wraps an OTLP exporter error with the kind of +// signal that produced it. +type wrappedExportError struct { + wrap error + kind ErrorKind +} + +// WrapTracesError wraps an error from the OTLP exporter for traces. +func WrapTracesError(err error) error { + return wrappedExportError{ + wrap: err, + kind: TracesExport, + } +} + +var _ error = wrappedExportError{} + +// Error attaches a prefix corresponding to the kind of exporter. +func (t wrappedExportError) Error() string { + return t.kind.prefix() + t.wrap.Error() +} + +// Unwrap returns the wrapped error. +func (t wrappedExportError) Unwrap() error { + return t.wrap +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md index 8a40a86a246fc..6e9cc0366ba2b 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md @@ -12,8 +12,8 @@ go get -u go.opentelemetry.io/otel/exporters/otlp/otlptrace ## Examples -- [Exporter setup and examples](./otlptracehttp/example_test.go) -- [Full example sending telemetry to a local collector](../../../example/otel-collector) +- [HTTP Exporter setup and examples](./otlptracehttp/example_test.go) +- [Full example of gRPC Exporter sending telemetry to a local collector](../../../example/otel-collector) ## [`otlptrace`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace) @@ -38,12 +38,14 @@ override the default configuration. For more information about how each of these environment variables is interpreted, see [the OpenTelemetry specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.8.0/specification/protocol/exporter.md). -| Environment variable | Option | Default value | -| ------------------------------------------------------------------------ |------------------------------ | ----------------------------------- | -| `OTEL_EXPORTER_OTLP_ENDPOINT` `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` | `WithEndpoint` `WithInsecure` | `https://localhost:4317` | -| `OTEL_EXPORTER_OTLP_CERTIFICATE` `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` | `WithTLSClientConfig` | | -| `OTEL_EXPORTER_OTLP_HEADERS` `OTEL_EXPORTER_OTLP_TRACES_HEADERS` | `WithHeaders` | | -| `OTEL_EXPORTER_OTLP_COMPRESSION` `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` | `WithCompression` | | -| `OTEL_EXPORTER_OTLP_TIMEOUT` `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` | `WithTimeout` | `10s` | +| Environment variable | Option | Default value | +| ------------------------------------------------------------------------ |------------------------------ | -------------------------------------------------------- | +| `OTEL_EXPORTER_OTLP_ENDPOINT` `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` | `WithEndpoint` `WithInsecure` | `https://localhost:4317` or `https://localhost:4318`[^1] | +| `OTEL_EXPORTER_OTLP_CERTIFICATE` `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` | `WithTLSClientConfig` | | +| `OTEL_EXPORTER_OTLP_HEADERS` `OTEL_EXPORTER_OTLP_TRACES_HEADERS` | `WithHeaders` | | +| `OTEL_EXPORTER_OTLP_COMPRESSION` `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` | `WithCompression` | | +| `OTEL_EXPORTER_OTLP_TIMEOUT` `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` | `WithTimeout` | `10s` | + +[^1]: The gRPC client defaults to `https://localhost:4317` and the HTTP client `https://localhost:4318`. Configuration using options have precedence over the environment variables. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go index 7e9bb6c47ae8f..b65802edbbdf0 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go @@ -19,6 +19,7 @@ import ( "errors" "sync" + "go.opentelemetry.io/otel/exporters/otlp/internal" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" tracesdk "go.opentelemetry.io/otel/sdk/trace" ) @@ -45,7 +46,11 @@ func (e *Exporter) ExportSpans(ctx context.Context, ss []tracesdk.ReadOnlySpan) return nil } - return e.client.UploadTraces(ctx, protoSpans) + err := e.client.UploadTraces(ctx, protoSpans) + if err != nil { + return internal.WrapTracesError(err) + } + return nil } // Start establishes a connection to the receiving endpoint. @@ -100,3 +105,14 @@ func NewUnstarted(client Client) *Exporter { client: client, } } + +// MarshalLog is the marshaling function used by the logging system to represent this exporter. +func (e *Exporter) MarshalLog() interface{} { + return struct { + Type string + Client Client + }{ + Type: "otlptrace", + Client: e.client, + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go index 77f13a1937b63..62c5029db2ac2 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/envconfig.go @@ -16,66 +16,59 @@ package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/ import ( "crypto/tls" - "fmt" - "io/ioutil" + "crypto/x509" "net/url" "os" "path" - "strconv" "strings" "time" - "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/internal/envconfig" ) -var DefaultEnvOptionsReader = EnvOptionsReader{ - GetEnv: os.Getenv, - ReadFile: ioutil.ReadFile, +// DefaultEnvOptionsReader is the default environments reader. +var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{ + GetEnv: os.Getenv, + ReadFile: os.ReadFile, + Namespace: "OTEL_EXPORTER_OTLP", } +// ApplyGRPCEnvConfigs applies the env configurations for gRPC. func ApplyGRPCEnvConfigs(cfg Config) Config { - return DefaultEnvOptionsReader.ApplyGRPCEnvConfigs(cfg) -} - -func ApplyHTTPEnvConfigs(cfg Config) Config { - return DefaultEnvOptionsReader.ApplyHTTPEnvConfigs(cfg) -} - -type EnvOptionsReader struct { - GetEnv func(string) string - ReadFile func(filename string) ([]byte, error) -} - -func (e *EnvOptionsReader) ApplyHTTPEnvConfigs(cfg Config) Config { - opts := e.GetOptionsFromEnv() + opts := getOptionsFromEnv() for _, opt := range opts { - cfg = opt.ApplyHTTPOption(cfg) + cfg = opt.ApplyGRPCOption(cfg) } return cfg } -func (e *EnvOptionsReader) ApplyGRPCEnvConfigs(cfg Config) Config { - opts := e.GetOptionsFromEnv() +// ApplyHTTPEnvConfigs applies the env configurations for HTTP. +func ApplyHTTPEnvConfigs(cfg Config) Config { + opts := getOptionsFromEnv() for _, opt := range opts { - cfg = opt.ApplyGRPCOption(cfg) + cfg = opt.ApplyHTTPOption(cfg) } return cfg } -func (e *EnvOptionsReader) GetOptionsFromEnv() []GenericOption { - var opts []GenericOption +func getOptionsFromEnv() []GenericOption { + opts := []GenericOption{} - // Endpoint - if v, ok := e.getEnvValue("TRACES_ENDPOINT"); ok { - u, err := url.Parse(v) - // Ignore invalid values. - if err == nil { - // This is used to set the scheme for OTLP/HTTP. - if insecureSchema(u.Scheme) { - opts = append(opts, WithInsecure()) - } else { - opts = append(opts, WithSecure()) - } + tlsConf := &tls.Config{} + DefaultEnvOptionsReader.Apply( + envconfig.WithURL("ENDPOINT", func(u *url.URL) { + opts = append(opts, withEndpointScheme(u)) + opts = append(opts, newSplitOption(func(cfg Config) Config { + cfg.Traces.Endpoint = u.Host + // For OTLP/HTTP endpoint URLs without a per-signal + // configuration, the passed endpoint is used as a base URL + // and the signals are sent to these paths relative to that. + cfg.Traces.URLPath = path.Join(u.Path, DefaultTracesPath) + return cfg + }, withEndpointForGRPC(u))) + }), + envconfig.WithURL("TRACES_ENDPOINT", func(u *url.URL) { + opts = append(opts, withEndpointScheme(u)) opts = append(opts, newSplitOption(func(cfg Config) Config { cfg.Traces.Endpoint = u.Host // For endpoint URLs for OTLP/HTTP per-signal variables, the @@ -88,140 +81,70 @@ func (e *EnvOptionsReader) GetOptionsFromEnv() []GenericOption { } cfg.Traces.URLPath = path return cfg - }, func(cfg Config) Config { - // For OTLP/gRPC endpoints, this is the target to which the - // exporter is going to send telemetry. - cfg.Traces.Endpoint = path.Join(u.Host, u.Path) - return cfg - })) - } - } else if v, ok = e.getEnvValue("ENDPOINT"); ok { - u, err := url.Parse(v) - // Ignore invalid values. - if err == nil { - // This is used to set the scheme for OTLP/HTTP. - if insecureSchema(u.Scheme) { - opts = append(opts, WithInsecure()) - } else { - opts = append(opts, WithSecure()) - } - opts = append(opts, newSplitOption(func(cfg Config) Config { - cfg.Traces.Endpoint = u.Host - // For OTLP/HTTP endpoint URLs without a per-signal - // configuration, the passed endpoint is used as a base URL - // and the signals are sent to these paths relative to that. - cfg.Traces.URLPath = path.Join(u.Path, DefaultTracesPath) - return cfg - }, func(cfg Config) Config { - // For OTLP/gRPC endpoints, this is the target to which the - // exporter is going to send telemetry. - cfg.Traces.Endpoint = path.Join(u.Host, u.Path) - return cfg - })) - } - } - - // Certificate File - if path, ok := e.getEnvValue("CERTIFICATE"); ok { - if tls, err := e.readTLSConfig(path); err == nil { - opts = append(opts, WithTLSClientConfig(tls)) - } else { - otel.Handle(fmt.Errorf("failed to configure otlp exporter certificate '%s': %w", path, err)) - } - } - if path, ok := e.getEnvValue("TRACES_CERTIFICATE"); ok { - if tls, err := e.readTLSConfig(path); err == nil { - opts = append(opts, WithTLSClientConfig(tls)) - } else { - otel.Handle(fmt.Errorf("failed to configure otlp traces exporter certificate '%s': %w", path, err)) - } - } - - // Headers - if h, ok := e.getEnvValue("HEADERS"); ok { - opts = append(opts, WithHeaders(stringToHeader(h))) - } - if h, ok := e.getEnvValue("TRACES_HEADERS"); ok { - opts = append(opts, WithHeaders(stringToHeader(h))) - } - - // Compression - if c, ok := e.getEnvValue("COMPRESSION"); ok { - opts = append(opts, WithCompression(stringToCompression(c))) - } - if c, ok := e.getEnvValue("TRACES_COMPRESSION"); ok { - opts = append(opts, WithCompression(stringToCompression(c))) - } - // Timeout - if t, ok := e.getEnvValue("TIMEOUT"); ok { - if d, err := strconv.Atoi(t); err == nil { - opts = append(opts, WithTimeout(time.Duration(d)*time.Millisecond)) - } - } - if t, ok := e.getEnvValue("TRACES_TIMEOUT"); ok { - if d, err := strconv.Atoi(t); err == nil { - opts = append(opts, WithTimeout(time.Duration(d)*time.Millisecond)) - } - } + }, withEndpointForGRPC(u))) + }), + envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), + envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }), + envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }), + withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }), + envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), + envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }), + envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), + envconfig.WithHeaders("TRACES_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }), + WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), + WithEnvCompression("TRACES_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }), + envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), + envconfig.WithDuration("TRACES_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }), + ) return opts } -func insecureSchema(schema string) bool { - switch strings.ToLower(schema) { +func withEndpointScheme(u *url.URL) GenericOption { + switch strings.ToLower(u.Scheme) { case "http", "unix": - return true + return WithInsecure() default: - return false + return WithSecure() } } -// getEnvValue gets an OTLP environment variable value of the specified key using the GetEnv function. -// This function already prepends the OTLP prefix to all key lookup. -func (e *EnvOptionsReader) getEnvValue(key string) (string, bool) { - v := strings.TrimSpace(e.GetEnv(fmt.Sprintf("OTEL_EXPORTER_OTLP_%s", key))) - return v, v != "" +func withEndpointForGRPC(u *url.URL) func(cfg Config) Config { + return func(cfg Config) Config { + // For OTLP/gRPC endpoints, this is the target to which the + // exporter is going to send telemetry. + cfg.Traces.Endpoint = path.Join(u.Host, u.Path) + return cfg + } } -func (e *EnvOptionsReader) readTLSConfig(path string) (*tls.Config, error) { - b, err := e.ReadFile(path) - if err != nil { - return nil, err +// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression. +func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) { + return func(e *envconfig.EnvOptionsReader) { + if v, ok := e.GetEnvValue(n); ok { + cp := NoCompression + if v == "gzip" { + cp = GzipCompression + } + + fn(cp) + } } - return CreateTLSConfig(b) } -func stringToCompression(value string) Compression { - switch value { - case "gzip": - return GzipCompression +// revive:disable-next-line:flag-parameter +func withInsecure(b bool) GenericOption { + if b { + return WithInsecure() } - - return NoCompression + return WithSecure() } -func stringToHeader(value string) map[string]string { - headersPairs := strings.Split(value, ",") - headers := make(map[string]string) - - for _, header := range headersPairs { - nameValue := strings.SplitN(header, "=", 2) - if len(nameValue) < 2 { - continue - } - name, err := url.QueryUnescape(nameValue[0]) - if err != nil { - continue +func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) { + return func(e *envconfig.EnvOptionsReader) { + if c.RootCAs != nil || len(c.Certificates) > 0 { + fn(c) } - trimmedName := strings.TrimSpace(name) - value, err := url.QueryUnescape(nameValue[1]) - if err != nil { - continue - } - trimmedValue := strings.TrimSpace(value) - - headers[trimmedName] = trimmedValue } - - return headers } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go index e6fb14e00e8de..c48ffd530814d 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/options.go @@ -25,6 +25,7 @@ import ( "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/encoding/gzip" + "go.opentelemetry.io/otel/exporters/otlp/internal" "go.opentelemetry.io/otel/exporters/otlp/internal/retry" ) @@ -65,24 +66,39 @@ type ( } ) -func NewDefaultConfig() Config { - c := Config{ +// NewHTTPConfig returns a new Config with all settings applied from opts and +// any unset setting using the default HTTP config values. +func NewHTTPConfig(opts ...HTTPOption) Config { + cfg := Config{ Traces: SignalConfig{ - Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorPort), + Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort), URLPath: DefaultTracesPath, Compression: NoCompression, Timeout: DefaultTimeout, }, RetryConfig: retry.DefaultConfig, } - - return c + cfg = ApplyHTTPEnvConfigs(cfg) + for _, opt := range opts { + cfg = opt.ApplyHTTPOption(cfg) + } + cfg.Traces.URLPath = internal.CleanPath(cfg.Traces.URLPath, DefaultTracesPath) + return cfg } // NewGRPCConfig returns a new Config with all settings applied from opts and // any unset setting using the default gRPC config values. func NewGRPCConfig(opts ...GRPCOption) Config { - cfg := NewDefaultConfig() + cfg := Config{ + Traces: SignalConfig{ + Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort), + URLPath: DefaultTracesPath, + Compression: NoCompression, + Timeout: DefaultTimeout, + }, + RetryConfig: retry.DefaultConfig, + DialOptions: []grpc.DialOption{grpc.WithUserAgent(internal.GetUserAgentHeader())}, + } cfg = ApplyGRPCEnvConfigs(cfg) for _, opt := range opts { cfg = opt.ApplyGRPCOption(cfg) diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/optiontypes.go index f69e31095d21b..c2d6c03615262 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/optiontypes.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig/optiontypes.go @@ -15,9 +15,10 @@ package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" const ( - // DefaultCollectorPort is the port the Exporter will attempt connect to - // if no collector port is provided. - DefaultCollectorPort uint16 = 4317 + // DefaultCollectorGRPCPort is the default gRPC port of the collector. + DefaultCollectorGRPCPort uint16 = 4317 + // DefaultCollectorHTTPPort is the default HTTP port of the collector. + DefaultCollectorHTTPPort uint16 = 4318 // DefaultCollectorHost is the host address the Exporter will attempt // connect to if no collector address is provided. DefaultCollectorHost string = "localhost" @@ -36,7 +37,7 @@ const ( GzipCompression ) -// Marshaler describes the kind of message format sent to the collector +// Marshaler describes the kind of message format sent to the collector. type Marshaler int const ( diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go index d9086a390de05..ec74f1aad7581 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go @@ -48,8 +48,8 @@ func Iterator(iter attribute.Iterator) []*commonpb.KeyValue { } // ResourceAttributes transforms a Resource OTLP key-values. -func ResourceAttributes(resource *resource.Resource) []*commonpb.KeyValue { - return Iterator(resource.Iter()) +func ResourceAttributes(res *resource.Resource) []*commonpb.KeyValue { + return Iterator(res.Iter()) } // KeyValue transforms an attribute KeyValue into an OTLP key-value. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go index 6246b17f578e7..7aaec38d22ae9 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go @@ -19,11 +19,11 @@ import ( commonpb "go.opentelemetry.io/proto/otlp/common/v1" ) -func InstrumentationLibrary(il instrumentation.Library) *commonpb.InstrumentationLibrary { - if il == (instrumentation.Library{}) { +func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationScope { + if il == (instrumentation.Scope{}) { return nil } - return &commonpb.InstrumentationLibrary{ + return &commonpb.InstrumentationScope{ Name: il.Name, Version: il.Version, } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go index 2f0f5eacb776d..b83cbd7247807 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go @@ -23,10 +23,6 @@ import ( tracepb "go.opentelemetry.io/proto/otlp/trace/v1" ) -const ( - maxEventsPerSpan = 128 -) - // Spans transforms a slice of OpenTelemetry spans into a slice of OTLP // ResourceSpans. func Spans(sdl []tracesdk.ReadOnlySpan) []*tracepb.ResourceSpans { @@ -36,11 +32,11 @@ func Spans(sdl []tracesdk.ReadOnlySpan) []*tracepb.ResourceSpans { rsm := make(map[attribute.Distinct]*tracepb.ResourceSpans) - type ilsKey struct { + type key struct { r attribute.Distinct - il instrumentation.Library + is instrumentation.Scope } - ilsm := make(map[ilsKey]*tracepb.InstrumentationLibrarySpans) + ssm := make(map[key]*tracepb.ScopeSpans) var resources int for _, sd := range sdl { @@ -49,30 +45,30 @@ func Spans(sdl []tracesdk.ReadOnlySpan) []*tracepb.ResourceSpans { } rKey := sd.Resource().Equivalent() - iKey := ilsKey{ + k := key{ r: rKey, - il: sd.InstrumentationLibrary(), + is: sd.InstrumentationScope(), } - ils, iOk := ilsm[iKey] + scopeSpan, iOk := ssm[k] if !iOk { - // Either the resource or instrumentation library were unknown. - ils = &tracepb.InstrumentationLibrarySpans{ - InstrumentationLibrary: InstrumentationLibrary(sd.InstrumentationLibrary()), - Spans: []*tracepb.Span{}, - SchemaUrl: sd.InstrumentationLibrary().SchemaURL, + // Either the resource or instrumentation scope were unknown. + scopeSpan = &tracepb.ScopeSpans{ + Scope: InstrumentationScope(sd.InstrumentationScope()), + Spans: []*tracepb.Span{}, + SchemaUrl: sd.InstrumentationScope().SchemaURL, } } - ils.Spans = append(ils.Spans, span(sd)) - ilsm[iKey] = ils + scopeSpan.Spans = append(scopeSpan.Spans, span(sd)) + ssm[k] = scopeSpan rs, rOk := rsm[rKey] if !rOk { resources++ // The resource was unknown. rs = &tracepb.ResourceSpans{ - Resource: Resource(sd.Resource()), - InstrumentationLibrarySpans: []*tracepb.InstrumentationLibrarySpans{ils}, - SchemaUrl: sd.Resource().SchemaURL(), + Resource: Resource(sd.Resource()), + ScopeSpans: []*tracepb.ScopeSpans{scopeSpan}, + SchemaUrl: sd.Resource().SchemaURL(), } rsm[rKey] = rs continue @@ -82,9 +78,9 @@ func Spans(sdl []tracesdk.ReadOnlySpan) []*tracepb.ResourceSpans { // library lookup was unknown because if so we need to add it to the // ResourceSpans. Otherwise, the instrumentation library has already // been seen and the append we did above will be included it in the - // InstrumentationLibrarySpans reference. + // ScopeSpans reference. if !iOk { - rs.InstrumentationLibrarySpans = append(rs.InstrumentationLibrarySpans, ils) + rs.ScopeSpans = append(rs.ScopeSpans, scopeSpan) } } @@ -162,9 +158,10 @@ func links(links []tracesdk.Link) []*tracepb.Span_Link { sid := otLink.SpanContext.SpanID() sl = append(sl, &tracepb.Span_Link{ - TraceId: tid[:], - SpanId: sid[:], - Attributes: KeyValues(otLink.Attributes), + TraceId: tid[:], + SpanId: sid[:], + Attributes: KeyValues(otLink.Attributes), + DroppedAttributesCount: uint32(otLink.DroppedAttributeCount), }) } return sl @@ -176,29 +173,16 @@ func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event { return nil } - evCount := len(es) - if evCount > maxEventsPerSpan { - evCount = maxEventsPerSpan - } - events := make([]*tracepb.Span_Event, 0, evCount) - nEvents := 0 - + events := make([]*tracepb.Span_Event, len(es)) // Transform message events - for _, e := range es { - if nEvents >= maxEventsPerSpan { - break + for i := 0; i < len(es); i++ { + events[i] = &tracepb.Span_Event{ + Name: es[i].Name, + TimeUnixNano: uint64(es[i].Time.UnixNano()), + Attributes: KeyValues(es[i].Attributes), + DroppedAttributesCount: uint32(es[i].DroppedAttributeCount), } - nEvents++ - events = append(events, - &tracepb.Span_Event{ - Name: e.Name, - TimeUnixNano: uint64(e.Time.UnixNano()), - Attributes: KeyValues(e.Attributes), - // TODO (rghetia) : Add Drop Counts when supported. - }, - ) } - return events } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go index d709ffa96e19b..fe23f8e3766f4 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go @@ -26,6 +26,8 @@ import ( "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/internal" "go.opentelemetry.io/otel/exporters/otlp/internal/retry" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" @@ -196,9 +198,17 @@ func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc defer cancel() return c.requestFunc(ctx, func(iCtx context.Context) error { - _, err := c.tsc.Export(iCtx, &coltracepb.ExportTraceServiceRequest{ + resp, err := c.tsc.Export(iCtx, &coltracepb.ExportTraceServiceRequest{ ResourceSpans: protoSpans, }) + if resp != nil && resp.PartialSuccess != nil { + msg := resp.PartialSuccess.GetErrorMessage() + n := resp.PartialSuccess.GetRejectedSpans() + if n != 0 || msg != "" { + err := internal.TracePartialSuccessError(n, msg) + otel.Handle(err) + } + } // nil is converted to OK. if status.Code(err) == codes.OK { // Success. @@ -265,11 +275,22 @@ func retryable(err error) (bool, time.Duration) { // throttleDelay returns a duration to wait for if an explicit throttle time // is included in the response status. -func throttleDelay(status *status.Status) time.Duration { - for _, detail := range status.Details() { +func throttleDelay(s *status.Status) time.Duration { + for _, detail := range s.Details() { if t, ok := detail.(*errdetails.RetryInfo); ok { return t.RetryDelay.AsDuration() } } return 0 } + +// MarshalLog is the marshaling function used by the logging system to represent this Client. +func (c *client) MarshalLog() interface{} { + return struct { + Type string + Endpoint string + }{ + Type: "otlphttpgrpc", + Endpoint: c.endpoint, + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go index e2e5bd696f63e..3d09ce590d0e7 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go @@ -84,8 +84,7 @@ func WithReconnectionPeriod(rp time.Duration) Option { } func compressorToCompression(compressor string) otlpconfig.Compression { - switch compressor { - case "gzip": + if compressor == "gzip" { return otlpconfig.GzipCompression } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go index 81487f9b6f99f..9fbe861717d44 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/client.go @@ -20,18 +20,17 @@ import ( "context" "fmt" "io" - "io/ioutil" "net" "net/http" "net/url" - "path" "strconv" - "strings" "sync" "time" "google.golang.org/protobuf/proto" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/internal" "go.opentelemetry.io/otel/exporters/otlp/internal/retry" "go.opentelemetry.io/otel/exporters/otlp/otlptrace" "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig" @@ -43,7 +42,7 @@ const contentTypeProto = "application/x-protobuf" var gzPool = sync.Pool{ New: func() interface{} { - w := gzip.NewWriter(ioutil.Discard) + w := gzip.NewWriter(io.Discard) return w }, } @@ -79,26 +78,7 @@ var _ otlptrace.Client = (*client)(nil) // NewClient creates a new HTTP trace client. func NewClient(opts ...Option) otlptrace.Client { - cfg := otlpconfig.NewDefaultConfig() - cfg = otlpconfig.ApplyHTTPEnvConfigs(cfg) - for _, opt := range opts { - cfg = opt.applyHTTPOption(cfg) - } - - for pathPtr, defaultPath := range map[*string]string{ - &cfg.Traces.URLPath: otlpconfig.DefaultTracesPath, - } { - tmp := strings.TrimSpace(*pathPtr) - if tmp == "" { - tmp = defaultPath - } else { - tmp = path.Clean(tmp) - if !path.IsAbs(tmp) { - tmp = fmt.Sprintf("/%s", tmp) - } - } - *pathPtr = tmp - } + cfg := otlpconfig.NewHTTPConfig(asHTTPOptions(opts)...) httpClient := &http.Client{ Transport: ourTransport, @@ -121,7 +101,7 @@ func NewClient(opts ...Option) otlptrace.Client { } } -// Start does nothing in a HTTP client +// Start does nothing in a HTTP client. func (d *client) Start(ctx context.Context) error { // nothing to do select { @@ -176,28 +156,49 @@ func (d *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc return err } - var rErr error + if resp != nil && resp.Body != nil { + defer func() { + if err := resp.Body.Close(); err != nil { + otel.Handle(err) + } + }() + } + switch resp.StatusCode { case http.StatusOK: // Success, do not retry. - case http.StatusTooManyRequests, - http.StatusServiceUnavailable: - // Retry-able failure. - rErr = newResponseError(resp.Header) - - // Going to retry, drain the body to reuse the connection. - if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { - _ = resp.Body.Close() + // Read the partial success message, if any. + var respData bytes.Buffer + if _, err := io.Copy(&respData, resp.Body); err != nil { return err } - default: - rErr = fmt.Errorf("failed to send %s to %s: %s", d.name, request.URL, resp.Status) - } - if err := resp.Body.Close(); err != nil { - return err + if respData.Len() != 0 { + var respProto coltracepb.ExportTraceServiceResponse + if err := proto.Unmarshal(respData.Bytes(), &respProto); err != nil { + return err + } + + if respProto.PartialSuccess != nil { + msg := respProto.PartialSuccess.GetErrorMessage() + n := respProto.PartialSuccess.GetRejectedSpans() + if n != 0 || msg != "" { + err := internal.TracePartialSuccessError(n, msg) + otel.Handle(err) + } + } + } + return nil + + case http.StatusTooManyRequests, http.StatusServiceUnavailable: + // Retry-able failures. Drain the body to reuse the connection. + if _, err := io.Copy(io.Discard, resp.Body); err != nil { + otel.Handle(err) + } + return newResponseError(resp.Header) + default: + return fmt.Errorf("failed to send to %s: %s", request.URL, resp.Status) } - return rErr }) } @@ -208,6 +209,8 @@ func (d *client) newRequest(body []byte) (request, error) { return request{Request: r}, err } + r.Header.Set("User-Agent", internal.GetUserAgentHeader()) + for k, v := range d.cfg.Headers { r.Header.Set(k, v) } @@ -243,10 +246,23 @@ func (d *client) newRequest(body []byte) (request, error) { return req, nil } +// MarshalLog is the marshaling function used by the logging system to represent this Client. +func (d *client) MarshalLog() interface{} { + return struct { + Type string + Endpoint string + Insecure bool + }{ + Type: "otlphttphttp", + Endpoint: d.cfg.Endpoint, + Insecure: d.cfg.Insecure, + } +} + // bodyReader returns a closure returning a new reader for buf. func bodyReader(buf []byte) func() io.ReadCloser { return func() io.ReadCloser { - return ioutil.NopCloser(bytes.NewReader(buf)) + return io.NopCloser(bytes.NewReader(buf)) } } diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go index e550cfb5d518e..4257ce3473f86 100644 --- a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/options.go @@ -40,6 +40,14 @@ type Option interface { applyHTTPOption(otlpconfig.Config) otlpconfig.Config } +func asHTTPOptions(opts []Option) []otlpconfig.HTTPOption { + converted := make([]otlpconfig.HTTPOption, len(opts)) + for i, o := range opts { + converted[i] = otlpconfig.NewHTTPOption(o.applyHTTPOption) + } + return converted +} + // RetryConfig defines configuration for retrying batches in case of export // failure using an exponential backoff. type RetryConfig retry.Config @@ -55,7 +63,7 @@ func (w wrappedOption) applyHTTPOption(cfg otlpconfig.Config) otlpconfig.Config // WithEndpoint allows one to set the address of the collector // endpoint that the driver will use to send spans. If // unset, it will instead try to use -// the default endpoint (localhost:4317). Note that the endpoint +// the default endpoint (localhost:4318). Note that the endpoint // must not contain any URL path. func WithEndpoint(endpoint string) Option { return wrappedOption{otlpconfig.WithEndpoint(endpoint)} diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go index 35263e01ac26d..ecd363ab51653 100644 --- a/vendor/go.opentelemetry.io/otel/handler.go +++ b/vendor/go.opentelemetry.io/otel/handler.go @@ -17,7 +17,8 @@ package otel // import "go.opentelemetry.io/otel" import ( "log" "os" - "sync" + "sync/atomic" + "unsafe" ) var ( @@ -34,29 +35,26 @@ var ( ) type delegator struct { - lock *sync.RWMutex - eh ErrorHandler + delegate unsafe.Pointer } func (d *delegator) Handle(err error) { - d.lock.RLock() - defer d.lock.RUnlock() - d.eh.Handle(err) + d.getDelegate().Handle(err) +} + +func (d *delegator) getDelegate() ErrorHandler { + return *(*ErrorHandler)(atomic.LoadPointer(&d.delegate)) } // setDelegate sets the ErrorHandler delegate. func (d *delegator) setDelegate(eh ErrorHandler) { - d.lock.Lock() - defer d.lock.Unlock() - d.eh = eh + atomic.StorePointer(&d.delegate, unsafe.Pointer(&eh)) } func defaultErrorHandler() *delegator { - return &delegator{ - lock: &sync.RWMutex{}, - eh: &errLogger{l: log.New(os.Stderr, "", log.LstdFlags)}, - } - + d := &delegator{} + d.setDelegate(&errLogger{l: log.New(os.Stderr, "", log.LstdFlags)}) + return d } // errLogger logs errors if no delegate is set, otherwise they are delegated. @@ -92,7 +90,7 @@ func SetErrorHandler(h ErrorHandler) { globalErrorHandler.setDelegate(h) } -// Handle is a convenience function for ErrorHandler().Handle(err) +// Handle is a convenience function for ErrorHandler().Handle(err). func Handle(err error) { GetErrorHandler().Handle(err) } diff --git a/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go new file mode 100644 index 0000000000000..2203489447358 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/attribute/attribute.go @@ -0,0 +1,45 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package attribute provide several helper functions for some commonly used +logic of processing attributes. +*/ +package attribute // import "go.opentelemetry.io/otel/internal/attribute" + +import ( + "reflect" +) + +// SliceValue convert a slice into an array with same elements as slice. +func SliceValue[T bool | int64 | float64 | string](v []T) any { + var zero T + cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))) + copy(cp.Elem().Slice(0, len(v)).Interface().([]T), v) + return cp.Elem().Interface() +} + +// AsSlice convert an array into a slice into with same elements as array. +func AsSlice[T bool | int64 | float64 | string](v any) []T { + rv := reflect.ValueOf(v) + if rv.Type().Kind() != reflect.Array { + return nil + } + var zero T + correctLen := rv.Len() + correctType := reflect.ArrayOf(correctLen, reflect.TypeOf(zero)) + cpy := reflect.New(correctType) + _ = reflect.Copy(cpy.Elem(), rv) + return cpy.Elem().Slice(0, correctLen).Interface().([]T) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go index 3c2784eea33e7..4469700d9cbb1 100644 --- a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go @@ -39,8 +39,7 @@ type baggageState struct { // Passing nil SetHookFunc creates a context with no set hook to call. func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Context { var s baggageState - switch v := parent.Value(baggageKey).(type) { - case baggageState: + if v, ok := parent.Value(baggageKey).(baggageState); ok { s = v } @@ -54,8 +53,7 @@ func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Contex // Passing nil GetHookFunc creates a context with no get hook to call. func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Context { var s baggageState - switch v := parent.Value(baggageKey).(type) { - case baggageState: + if v, ok := parent.Value(baggageKey).(baggageState); ok { s = v } @@ -67,8 +65,7 @@ func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Contex // returns a context without any baggage. func ContextWithList(parent context.Context, list List) context.Context { var s baggageState - switch v := parent.Value(baggageKey).(type) { - case baggageState: + if v, ok := parent.Value(baggageKey).(baggageState); ok { s = v } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go index 0a378476b0fd2..293c08961fbcd 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go @@ -17,7 +17,8 @@ package global // import "go.opentelemetry.io/otel/internal/global" import ( "log" "os" - "sync" + "sync/atomic" + "unsafe" "github.com/go-logr/logr" "github.com/go-logr/stdr" @@ -27,37 +28,36 @@ import ( // // The default logger uses stdr which is backed by the standard `log.Logger` // interface. This logger will only show messages at the Error Level. -var globalLogger logr.Logger = stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)) -var globalLoggerLock = &sync.RWMutex{} +var globalLogger unsafe.Pointer + +func init() { + SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) +} // SetLogger overrides the globalLogger with l. // // To see Info messages use a logger with `l.V(1).Enabled() == true` -// To see Debug messages use a logger with `l.V(5).Enabled() == true` +// To see Debug messages use a logger with `l.V(5).Enabled() == true`. func SetLogger(l logr.Logger) { - globalLoggerLock.Lock() - defer globalLoggerLock.Unlock() - globalLogger = l + atomic.StorePointer(&globalLogger, unsafe.Pointer(&l)) +} + +func getLogger() logr.Logger { + return *(*logr.Logger)(atomic.LoadPointer(&globalLogger)) } // Info prints messages about the general state of the API or SDK. -// This should usually be less then 5 messages a minute +// This should usually be less then 5 messages a minute. func Info(msg string, keysAndValues ...interface{}) { - globalLoggerLock.RLock() - defer globalLoggerLock.RUnlock() - globalLogger.V(1).Info(msg, keysAndValues...) + getLogger().V(1).Info(msg, keysAndValues...) } // Error prints messages about exceptional states of the API or SDK. func Error(err error, msg string, keysAndValues ...interface{}) { - globalLoggerLock.RLock() - defer globalLoggerLock.RUnlock() - globalLogger.Error(err, msg, keysAndValues...) + getLogger().Error(err, msg, keysAndValues...) } // Debug prints messages about all internal changes in the API or SDK. func Debug(msg string, keysAndValues ...interface{}) { - globalLoggerLock.RLock() - defer globalLoggerLock.RUnlock() - globalLogger.V(5).Info(msg, keysAndValues...) + getLogger().V(5).Info(msg, keysAndValues...) } diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go index d6b3e900cd451..1ad38f828ecd7 100644 --- a/vendor/go.opentelemetry.io/otel/internal/global/state.go +++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go @@ -15,6 +15,7 @@ package global // import "go.opentelemetry.io/otel/internal/global" import ( + "errors" "sync" "sync/atomic" @@ -47,17 +48,24 @@ func TracerProvider() trace.TracerProvider { // SetTracerProvider is the internal implementation for global.SetTracerProvider. func SetTracerProvider(tp trace.TracerProvider) { + current := TracerProvider() + + if _, cOk := current.(*tracerProvider); cOk { + if _, tpOk := tp.(*tracerProvider); tpOk && current == tp { + // Do not assign the default delegating TracerProvider to delegate + // to itself. + Error( + errors.New("no delegate configured in tracer provider"), + "Setting tracer provider to it's current value. No delegate will be configured", + ) + return + } + } + delegateTraceOnce.Do(func() { - current := TracerProvider() - if current == tp { - // Setting the provider to the prior default is nonsense, panic. - // Panic is acceptable because we are likely still early in the - // process lifetime. - panic("invalid TracerProvider, the global instance cannot be reinstalled") - } else if def, ok := current.(*tracerProvider); ok { + if def, ok := current.(*tracerProvider); ok { def.setDelegate(tp) } - }) globalTracer.Store(tracerProviderHolder{tp: tp}) } @@ -69,15 +77,24 @@ func TextMapPropagator() propagation.TextMapPropagator { // SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator. func SetTextMapPropagator(p propagation.TextMapPropagator) { + current := TextMapPropagator() + + if _, cOk := current.(*textMapPropagator); cOk { + if _, pOk := p.(*textMapPropagator); pOk && current == p { + // Do not assign the default delegating TextMapPropagator to + // delegate to itself. + Error( + errors.New("no delegate configured in text map propagator"), + "Setting text map propagator to it's current value. No delegate will be configured", + ) + return + } + } + // For the textMapPropagator already returned by TextMapPropagator // delegate to p. delegateTextMapPropagatorOnce.Do(func() { - if current := TextMapPropagator(); current == p { - // Setting the provider to the prior default is nonsense, panic. - // Panic is acceptable because we are likely still early in the - // process lifetime. - panic("invalid TextMapPropagator, the global instance cannot be reinstalled") - } else if def, ok := current.(*textMapPropagator); ok { + if def, ok := current.(*textMapPropagator); ok { def.SetDelegate(p) } }) @@ -96,11 +113,3 @@ func defaultPropagatorsValue() *atomic.Value { v.Store(propagatorsHolder{tm: newTextMapPropagator()}) return v } - -// ResetForTest restores the initial global state, for testing purposes. -func ResetForTest() { - globalTracer = defaultTracerValue() - globalPropagators = defaultPropagatorsValue() - delegateTraceOnce = sync.Once{} - delegateTextMapPropagatorOnce = sync.Once{} -} diff --git a/vendor/go.opentelemetry.io/otel/internal/metric/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/metric/global/meter.go deleted file mode 100644 index 77781ead118cf..0000000000000 --- a/vendor/go.opentelemetry.io/otel/internal/metric/global/meter.go +++ /dev/null @@ -1,287 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package global // import "go.opentelemetry.io/otel/internal/metric/global" - -import ( - "context" - "sync" - "sync/atomic" - "unsafe" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/internal/metric/registry" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/number" - "go.opentelemetry.io/otel/metric/sdkapi" -) - -// This file contains the forwarding implementation of MeterProvider used as -// the default global instance. Metric events using instruments provided by -// this implementation are no-ops until the first Meter implementation is set -// as the global provider. -// -// The implementation here uses Mutexes to maintain a list of active Meters in -// the MeterProvider and Instruments in each Meter, under the assumption that -// these interfaces are not performance-critical. -// -// We have the invariant that setDelegate() will be called before a new -// MeterProvider implementation is registered as the global provider. Mutexes -// in the MeterProvider and Meters ensure that each instrument has a delegate -// before the global provider is set. -// -// Metric uniqueness checking is implemented by calling the exported -// methods of the api/metric/registry package. - -type meterKey struct { - InstrumentationName string - InstrumentationVersion string - SchemaURL string -} - -type meterProvider struct { - delegate metric.MeterProvider - - // lock protects `delegate` and `meters`. - lock sync.Mutex - - // meters maintains a unique entry for every named Meter - // that has been registered through the global instance. - meters map[meterKey]*meterEntry -} - -type meterImpl struct { - delegate unsafe.Pointer // (*metric.MeterImpl) - - lock sync.Mutex - syncInsts []*syncImpl - asyncInsts []*asyncImpl -} - -type meterEntry struct { - unique sdkapi.MeterImpl - impl meterImpl -} - -type instrument struct { - descriptor sdkapi.Descriptor -} - -type syncImpl struct { - delegate unsafe.Pointer // (*sdkapi.SyncImpl) - - instrument -} - -type asyncImpl struct { - delegate unsafe.Pointer // (*sdkapi.AsyncImpl) - - instrument - - runner sdkapi.AsyncRunner -} - -var _ metric.MeterProvider = &meterProvider{} -var _ sdkapi.MeterImpl = &meterImpl{} -var _ sdkapi.InstrumentImpl = &syncImpl{} -var _ sdkapi.AsyncImpl = &asyncImpl{} - -func (inst *instrument) Descriptor() sdkapi.Descriptor { - return inst.descriptor -} - -// MeterProvider interface and delegation - -func newMeterProvider() *meterProvider { - return &meterProvider{ - meters: map[meterKey]*meterEntry{}, - } -} - -func (p *meterProvider) setDelegate(provider metric.MeterProvider) { - p.lock.Lock() - defer p.lock.Unlock() - - p.delegate = provider - for key, entry := range p.meters { - entry.impl.setDelegate(key, provider) - } - p.meters = nil -} - -func (p *meterProvider) Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter { - p.lock.Lock() - defer p.lock.Unlock() - - if p.delegate != nil { - return p.delegate.Meter(instrumentationName, opts...) - } - - cfg := metric.NewMeterConfig(opts...) - key := meterKey{ - InstrumentationName: instrumentationName, - InstrumentationVersion: cfg.InstrumentationVersion(), - SchemaURL: cfg.SchemaURL(), - } - entry, ok := p.meters[key] - if !ok { - entry = &meterEntry{} - // Note: This code implements its own MeterProvider - // name-uniqueness logic because there is - // synchronization required at the moment of - // delegation. We use the same instrument-uniqueness - // checking the real SDK uses here: - entry.unique = registry.NewUniqueInstrumentMeterImpl(&entry.impl) - p.meters[key] = entry - } - return metric.WrapMeterImpl(entry.unique) -} - -// Meter interface and delegation - -func (m *meterImpl) setDelegate(key meterKey, provider metric.MeterProvider) { - m.lock.Lock() - defer m.lock.Unlock() - - d := new(sdkapi.MeterImpl) - *d = provider.Meter( - key.InstrumentationName, - metric.WithInstrumentationVersion(key.InstrumentationVersion), - metric.WithSchemaURL(key.SchemaURL), - ).MeterImpl() - m.delegate = unsafe.Pointer(d) - - for _, inst := range m.syncInsts { - inst.setDelegate(*d) - } - m.syncInsts = nil - for _, obs := range m.asyncInsts { - obs.setDelegate(*d) - } - m.asyncInsts = nil -} - -func (m *meterImpl) NewSyncInstrument(desc sdkapi.Descriptor) (sdkapi.SyncImpl, error) { - m.lock.Lock() - defer m.lock.Unlock() - - if meterPtr := (*sdkapi.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil { - return (*meterPtr).NewSyncInstrument(desc) - } - - inst := &syncImpl{ - instrument: instrument{ - descriptor: desc, - }, - } - m.syncInsts = append(m.syncInsts, inst) - return inst, nil -} - -// Synchronous delegation - -func (inst *syncImpl) setDelegate(d sdkapi.MeterImpl) { - implPtr := new(sdkapi.SyncImpl) - - var err error - *implPtr, err = d.NewSyncInstrument(inst.descriptor) - - if err != nil { - // TODO: There is no standard way to deliver this error to the user. - // See https://github.com/open-telemetry/opentelemetry-go/issues/514 - // Note that the default SDK will not generate any errors yet, this is - // only for added safety. - panic(err) - } - - atomic.StorePointer(&inst.delegate, unsafe.Pointer(implPtr)) -} - -func (inst *syncImpl) Implementation() interface{} { - if implPtr := (*sdkapi.SyncImpl)(atomic.LoadPointer(&inst.delegate)); implPtr != nil { - return (*implPtr).Implementation() - } - return inst -} - -// Async delegation - -func (m *meterImpl) NewAsyncInstrument( - desc sdkapi.Descriptor, - runner sdkapi.AsyncRunner, -) (sdkapi.AsyncImpl, error) { - - m.lock.Lock() - defer m.lock.Unlock() - - if meterPtr := (*sdkapi.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil { - return (*meterPtr).NewAsyncInstrument(desc, runner) - } - - inst := &asyncImpl{ - instrument: instrument{ - descriptor: desc, - }, - runner: runner, - } - m.asyncInsts = append(m.asyncInsts, inst) - return inst, nil -} - -func (obs *asyncImpl) Implementation() interface{} { - if implPtr := (*sdkapi.AsyncImpl)(atomic.LoadPointer(&obs.delegate)); implPtr != nil { - return (*implPtr).Implementation() - } - return obs -} - -func (obs *asyncImpl) setDelegate(d sdkapi.MeterImpl) { - implPtr := new(sdkapi.AsyncImpl) - - var err error - *implPtr, err = d.NewAsyncInstrument(obs.descriptor, obs.runner) - - if err != nil { - // TODO: There is no standard way to deliver this error to the user. - // See https://github.com/open-telemetry/opentelemetry-go/issues/514 - // Note that the default SDK will not generate any errors yet, this is - // only for added safety. - panic(err) - } - - atomic.StorePointer(&obs.delegate, unsafe.Pointer(implPtr)) -} - -// Metric updates - -func (m *meterImpl) RecordBatch(ctx context.Context, labels []attribute.KeyValue, measurements ...sdkapi.Measurement) { - if delegatePtr := (*sdkapi.MeterImpl)(atomic.LoadPointer(&m.delegate)); delegatePtr != nil { - (*delegatePtr).RecordBatch(ctx, labels, measurements...) - } -} - -func (inst *syncImpl) RecordOne(ctx context.Context, number number.Number, labels []attribute.KeyValue) { - if instPtr := (*sdkapi.SyncImpl)(atomic.LoadPointer(&inst.delegate)); instPtr != nil { - (*instPtr).RecordOne(ctx, number, labels) - } -} - -func AtomicFieldOffsets() map[string]uintptr { - return map[string]uintptr{ - "meterProvider.delegate": unsafe.Offsetof(meterProvider{}.delegate), - "meterImpl.delegate": unsafe.Offsetof(meterImpl{}.delegate), - "syncImpl.delegate": unsafe.Offsetof(syncImpl{}.delegate), - "asyncImpl.delegate": unsafe.Offsetof(asyncImpl{}.delegate), - } -} diff --git a/vendor/go.opentelemetry.io/otel/internal/metric/registry/registry.go b/vendor/go.opentelemetry.io/otel/internal/metric/registry/registry.go deleted file mode 100644 index c929bf45c85ce..0000000000000 --- a/vendor/go.opentelemetry.io/otel/internal/metric/registry/registry.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package registry // import "go.opentelemetry.io/otel/internal/metric/registry" - -import ( - "context" - "fmt" - "sync" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/sdkapi" -) - -// UniqueInstrumentMeterImpl implements the metric.MeterImpl interface, adding -// uniqueness checking for instrument descriptors. -type UniqueInstrumentMeterImpl struct { - lock sync.Mutex - impl sdkapi.MeterImpl - state map[string]sdkapi.InstrumentImpl -} - -var _ sdkapi.MeterImpl = (*UniqueInstrumentMeterImpl)(nil) - -// ErrMetricKindMismatch is the standard error for mismatched metric -// instrument definitions. -var ErrMetricKindMismatch = fmt.Errorf( - "a metric was already registered by this name with another kind or number type") - -// NewUniqueInstrumentMeterImpl returns a wrapped metric.MeterImpl -// with the addition of instrument name uniqueness checking. -func NewUniqueInstrumentMeterImpl(impl sdkapi.MeterImpl) *UniqueInstrumentMeterImpl { - return &UniqueInstrumentMeterImpl{ - impl: impl, - state: map[string]sdkapi.InstrumentImpl{}, - } -} - -// MeterImpl gives the caller access to the underlying MeterImpl -// used by this UniqueInstrumentMeterImpl. -func (u *UniqueInstrumentMeterImpl) MeterImpl() sdkapi.MeterImpl { - return u.impl -} - -// RecordBatch implements sdkapi.MeterImpl. -func (u *UniqueInstrumentMeterImpl) RecordBatch(ctx context.Context, labels []attribute.KeyValue, ms ...sdkapi.Measurement) { - u.impl.RecordBatch(ctx, labels, ms...) -} - -// NewMetricKindMismatchError formats an error that describes a -// mismatched metric instrument definition. -func NewMetricKindMismatchError(desc sdkapi.Descriptor) error { - return fmt.Errorf("metric %s registered as %s %s: %w", - desc.Name(), - desc.NumberKind(), - desc.InstrumentKind(), - ErrMetricKindMismatch) -} - -// Compatible determines whether two sdkapi.Descriptors are considered -// the same for the purpose of uniqueness checking. -func Compatible(candidate, existing sdkapi.Descriptor) bool { - return candidate.InstrumentKind() == existing.InstrumentKind() && - candidate.NumberKind() == existing.NumberKind() -} - -// checkUniqueness returns an ErrMetricKindMismatch error if there is -// a conflict between a descriptor that was already registered and the -// `descriptor` argument. If there is an existing compatible -// registration, this returns the already-registered instrument. If -// there is no conflict and no prior registration, returns (nil, nil). -func (u *UniqueInstrumentMeterImpl) checkUniqueness(descriptor sdkapi.Descriptor) (sdkapi.InstrumentImpl, error) { - impl, ok := u.state[descriptor.Name()] - if !ok { - return nil, nil - } - - if !Compatible(descriptor, impl.Descriptor()) { - return nil, NewMetricKindMismatchError(impl.Descriptor()) - } - - return impl, nil -} - -// NewSyncInstrument implements sdkapi.MeterImpl. -func (u *UniqueInstrumentMeterImpl) NewSyncInstrument(descriptor sdkapi.Descriptor) (sdkapi.SyncImpl, error) { - u.lock.Lock() - defer u.lock.Unlock() - - impl, err := u.checkUniqueness(descriptor) - - if err != nil { - return nil, err - } else if impl != nil { - return impl.(sdkapi.SyncImpl), nil - } - - syncInst, err := u.impl.NewSyncInstrument(descriptor) - if err != nil { - return nil, err - } - u.state[descriptor.Name()] = syncInst - return syncInst, nil -} - -// NewAsyncInstrument implements sdkapi.MeterImpl. -func (u *UniqueInstrumentMeterImpl) NewAsyncInstrument( - descriptor sdkapi.Descriptor, - runner sdkapi.AsyncRunner, -) (sdkapi.AsyncImpl, error) { - u.lock.Lock() - defer u.lock.Unlock() - - impl, err := u.checkUniqueness(descriptor) - - if err != nil { - return nil, err - } else if impl != nil { - return impl.(sdkapi.AsyncImpl), nil - } - - asyncInst, err := u.impl.NewAsyncInstrument(descriptor, runner) - if err != nil { - return nil, err - } - u.state[descriptor.Name()] = asyncInst - return asyncInst, nil -} diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go index ce7afaa18805f..e07e7940004c2 100644 --- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -19,7 +19,7 @@ import ( "unsafe" ) -func BoolToRaw(b bool) uint64 { +func BoolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag. if b { return 1 } diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go index 3f722344fa7ce..621e4c5fcb833 100644 --- a/vendor/go.opentelemetry.io/otel/metric/config.go +++ b/vendor/go.opentelemetry.io/otel/metric/config.go @@ -14,65 +14,6 @@ package metric // import "go.opentelemetry.io/otel/metric" -import ( - "go.opentelemetry.io/otel/metric/unit" -) - -// InstrumentConfig contains options for metric instrument descriptors. -type InstrumentConfig struct { - description string - unit unit.Unit -} - -// Description describes the instrument in human-readable terms. -func (cfg *InstrumentConfig) Description() string { - return cfg.description -} - -// Unit describes the measurement unit for a instrument. -func (cfg *InstrumentConfig) Unit() unit.Unit { - return cfg.unit -} - -// InstrumentOption is an interface for applying metric instrument options. -type InstrumentOption interface { - // ApplyMeter is used to set a InstrumentOption value of a - // InstrumentConfig. - applyInstrument(InstrumentConfig) InstrumentConfig -} - -// NewInstrumentConfig creates a new InstrumentConfig -// and applies all the given options. -func NewInstrumentConfig(opts ...InstrumentOption) InstrumentConfig { - var config InstrumentConfig - for _, o := range opts { - config = o.applyInstrument(config) - } - return config -} - -type instrumentOptionFunc func(InstrumentConfig) InstrumentConfig - -func (fn instrumentOptionFunc) applyInstrument(cfg InstrumentConfig) InstrumentConfig { - return fn(cfg) -} - -// WithDescription applies provided description. -func WithDescription(desc string) InstrumentOption { - return instrumentOptionFunc(func(cfg InstrumentConfig) InstrumentConfig { - cfg.description = desc - return cfg - }) -} - -// WithUnit applies provided unit. -func WithUnit(unit unit.Unit) InstrumentOption { - return instrumentOptionFunc(func(cfg InstrumentConfig) InstrumentConfig { - cfg.unit = unit - return cfg - }) -} - // MeterConfig contains options for Meters. type MeterConfig struct { instrumentationVersion string @@ -80,18 +21,18 @@ type MeterConfig struct { } // InstrumentationVersion is the version of the library providing instrumentation. -func (cfg *MeterConfig) InstrumentationVersion() string { +func (cfg MeterConfig) InstrumentationVersion() string { return cfg.instrumentationVersion } // SchemaURL is the schema_url of the library providing instrumentation. -func (cfg *MeterConfig) SchemaURL() string { +func (cfg MeterConfig) SchemaURL() string { return cfg.schemaURL } // MeterOption is an interface for applying Meter options. type MeterOption interface { - // ApplyMeter is used to set a MeterOption value of a MeterConfig. + // applyMeter is used to set a MeterOption value of a MeterConfig. applyMeter(MeterConfig) MeterConfig } diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go index 4baf0719fcc10..bd6f43437208b 100644 --- a/vendor/go.opentelemetry.io/otel/metric/doc.go +++ b/vendor/go.opentelemetry.io/otel/metric/doc.go @@ -19,49 +19,5 @@ OpenTelemetry API. This package is currently in a pre-GA phase. Backwards incompatible changes may be introduced in subsequent minor version releases as we work to track the evolving OpenTelemetry specification and user feedback. - -Measurements can be made about an operation being performed or the state of a -system in general. These measurements can be crucial to the reliable operation -of code and provide valuable insights about the inner workings of a system. - -Measurements are made using instruments provided by this package. The type of -instrument used will depend on the type of measurement being made and of what -part of a system is being measured. - -Instruments are categorized as Synchronous or Asynchronous and independently -as Adding or Grouping. Synchronous instruments are called by the user with a -Context. Asynchronous instruments are called by the SDK during collection. -Adding instruments are semantically intended for capturing a sum. Grouping -instruments are intended for capturing a distribution. - -Adding instruments may be monotonic, in which case they are non-decreasing -and naturally define a rate. - -The synchronous instrument names are: - - Counter: adding, monotonic - UpDownCounter: adding - Histogram: grouping - -and the asynchronous instruments are: - - CounterObserver: adding, monotonic - UpDownCounterObserver: adding - GaugeObserver: grouping - -All instruments are provided with support for either float64 or int64 input -values. - -An instrument is created using a Meter. Additionally, a Meter is used to -record batches of synchronous measurements or asynchronous observations. A -Meter is obtained using a MeterProvider. A Meter, like a Tracer, is unique to -the instrumentation it instruments and must be named and versioned when -created with a MeterProvider with the name and version of the instrumentation -library. - -Instrumentation should be designed to accept a MeterProvider from which it can -create its own unique Meter. Alternatively, the registered global -MeterProvider from the go.opentelemetry.io/otel package can be used as a -default. */ package metric // import "go.opentelemetry.io/otel/metric" diff --git a/vendor/go.opentelemetry.io/otel/metric/global/global.go b/vendor/go.opentelemetry.io/otel/metric/global/global.go new file mode 100644 index 0000000000000..05a67c2e99952 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/global/global.go @@ -0,0 +1,42 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/metric/global" + +import ( + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/internal/global" +) + +// Meter returns a Meter from the global MeterProvider. The +// instrumentationName must be the name of the library providing +// instrumentation. This name may be the same as the instrumented code only if +// that code provides built-in instrumentation. If the instrumentationName is +// empty, then a implementation defined default name will be used instead. +// +// This is short for MeterProvider().Meter(name). +func Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter { + return MeterProvider().Meter(instrumentationName, opts...) +} + +// MeterProvider returns the registered global trace provider. +// If none is registered then a No-op MeterProvider is returned. +func MeterProvider() metric.MeterProvider { + return global.MeterProvider() +} + +// SetMeterProvider registers `mp` as the global meter provider. +func SetMeterProvider(mp metric.MeterProvider) { + global.SetMeterProvider(mp) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/global/metric.go b/vendor/go.opentelemetry.io/otel/metric/global/metric.go deleted file mode 100644 index 14ba862002a2d..0000000000000 --- a/vendor/go.opentelemetry.io/otel/metric/global/metric.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package global // import "go.opentelemetry.io/otel/metric/global" - -import ( - "go.opentelemetry.io/otel/internal/metric/global" - "go.opentelemetry.io/otel/metric" -) - -// Meter creates an implementation of the Meter interface from the global -// MeterProvider. The instrumentationName must be the name of the library -// providing instrumentation. This name may be the same as the instrumented -// code only if that code provides built-in instrumentation. If the -// instrumentationName is empty, then a implementation defined default name -// will be used instead. -// -// This is short for MeterProvider().Meter(name) -func Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter { - return GetMeterProvider().Meter(instrumentationName, opts...) -} - -// GetMeterProvider returns the registered global meter provider. If -// none is registered then a default meter provider is returned that -// forwards the Meter interface to the first registered Meter. -// -// Use the meter provider to create a named meter. E.g. -// meter := global.MeterProvider().Meter("example.com/foo") -// or -// meter := global.Meter("example.com/foo") -func GetMeterProvider() metric.MeterProvider { - return global.MeterProvider() -} - -// SetMeterProvider registers `mp` as the global meter provider. -func SetMeterProvider(mp metric.MeterProvider) { - global.SetMeterProvider(mp) -} diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go new file mode 100644 index 0000000000000..330a14c2f64a1 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncfloat64/asyncfloat64.go @@ -0,0 +1,80 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package asyncfloat64 // import "go.opentelemetry.io/otel/metric/instrument/asyncfloat64" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/instrument" +) + +// InstrumentProvider provides access to individual instruments. +// +// Warning: methods may be added to this interface in minor releases. +type InstrumentProvider interface { + // Counter creates an instrument for recording increasing values. + Counter(name string, opts ...instrument.Option) (Counter, error) + + // UpDownCounter creates an instrument for recording changes of a value. + UpDownCounter(name string, opts ...instrument.Option) (UpDownCounter, error) + + // Gauge creates an instrument for recording the current value. + Gauge(name string, opts ...instrument.Option) (Gauge, error) +} + +// Counter is an instrument that records increasing values. +// +// Warning: methods may be added to this interface in minor releases. +type Counter interface { + // Observe records the state of the instrument to be x. Implementations + // will assume x to be the cumulative sum of the count. + // + // It is only valid to call this within a callback. If called outside of the + // registered callback it should have no effect on the instrument, and an + // error will be reported via the error handler. + Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) + + instrument.Asynchronous +} + +// UpDownCounter is an instrument that records increasing or decreasing values. +// +// Warning: methods may be added to this interface in minor releases. +type UpDownCounter interface { + // Observe records the state of the instrument to be x. Implementations + // will assume x to be the cumulative sum of the count. + // + // It is only valid to call this within a callback. If called outside of the + // registered callback it should have no effect on the instrument, and an + // error will be reported via the error handler. + Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) + + instrument.Asynchronous +} + +// Gauge is an instrument that records independent readings. +// +// Warning: methods may be added to this interface in minor releases. +type Gauge interface { + // Observe records the state of the instrument to be x. + // + // It is only valid to call this within a callback. If called outside of the + // registered callback it should have no effect on the instrument, and an + // error will be reported via the error handler. + Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) + + instrument.Asynchronous +} diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go new file mode 100644 index 0000000000000..4fce9963c3b6f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/asyncint64/asyncint64.go @@ -0,0 +1,80 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package asyncint64 // import "go.opentelemetry.io/otel/metric/instrument/asyncint64" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/instrument" +) + +// InstrumentProvider provides access to individual instruments. +// +// Warning: methods may be added to this interface in minor releases. +type InstrumentProvider interface { + // Counter creates an instrument for recording increasing values. + Counter(name string, opts ...instrument.Option) (Counter, error) + + // UpDownCounter creates an instrument for recording changes of a value. + UpDownCounter(name string, opts ...instrument.Option) (UpDownCounter, error) + + // Gauge creates an instrument for recording the current value. + Gauge(name string, opts ...instrument.Option) (Gauge, error) +} + +// Counter is an instrument that records increasing values. +// +// Warning: methods may be added to this interface in minor releases. +type Counter interface { + // Observe records the state of the instrument to be x. Implementations + // will assume x to be the cumulative sum of the count. + // + // It is only valid to call this within a callback. If called outside of the + // registered callback it should have no effect on the instrument, and an + // error will be reported via the error handler. + Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) + + instrument.Asynchronous +} + +// UpDownCounter is an instrument that records increasing or decreasing values. +// +// Warning: methods may be added to this interface in minor releases. +type UpDownCounter interface { + // Observe records the state of the instrument to be x. Implementations + // will assume x to be the cumulative sum of the count. + // + // It is only valid to call this within a callback. If called outside of the + // registered callback it should have no effect on the instrument, and an + // error will be reported via the error handler. + Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) + + instrument.Asynchronous +} + +// Gauge is an instrument that records independent readings. +// +// Warning: methods may be added to this interface in minor releases. +type Gauge interface { + // Observe records the state of the instrument to be x. + // + // It is only valid to call this within a callback. If called outside of the + // registered callback it should have no effect on the instrument, and an + // error will be reported via the error handler. + Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) + + instrument.Asynchronous +} diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/config.go b/vendor/go.opentelemetry.io/otel/metric/instrument/config.go new file mode 100644 index 0000000000000..8778bce161970 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/config.go @@ -0,0 +1,69 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package instrument // import "go.opentelemetry.io/otel/metric/instrument" + +import "go.opentelemetry.io/otel/metric/unit" + +// Config contains options for metric instrument descriptors. +type Config struct { + description string + unit unit.Unit +} + +// Description describes the instrument in human-readable terms. +func (cfg Config) Description() string { + return cfg.description +} + +// Unit describes the measurement unit for an instrument. +func (cfg Config) Unit() unit.Unit { + return cfg.unit +} + +// Option is an interface for applying metric instrument options. +type Option interface { + applyInstrument(Config) Config +} + +// NewConfig creates a new Config and applies all the given options. +func NewConfig(opts ...Option) Config { + var config Config + for _, o := range opts { + config = o.applyInstrument(config) + } + return config +} + +type optionFunc func(Config) Config + +func (fn optionFunc) applyInstrument(cfg Config) Config { + return fn(cfg) +} + +// WithDescription applies provided description. +func WithDescription(desc string) Option { + return optionFunc(func(cfg Config) Config { + cfg.description = desc + return cfg + }) +} + +// WithUnit applies provided unit. +func WithUnit(u unit.Unit) Option { + return optionFunc(func(cfg Config) Config { + cfg.unit = u + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go new file mode 100644 index 0000000000000..e1bbb850d76d2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/instrument.go @@ -0,0 +1,30 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package instrument // import "go.opentelemetry.io/otel/metric/instrument" + +// Asynchronous instruments are instruments that are updated within a Callback. +// If an instrument is observed outside of it's callback it should be an error. +// +// This interface is used as a grouping mechanism. +type Asynchronous interface { + asynchronous() +} + +// Synchronous instruments are updated in line with application code. +// +// This interface is used as a grouping mechanism. +type Synchronous interface { + synchronous() +} diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go new file mode 100644 index 0000000000000..2ec192f70d879 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/syncfloat64/syncfloat64.go @@ -0,0 +1,64 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package syncfloat64 // import "go.opentelemetry.io/otel/metric/instrument/syncfloat64" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/instrument" +) + +// InstrumentProvider provides access to individual instruments. +// +// Warning: methods may be added to this interface in minor releases. +type InstrumentProvider interface { + // Counter creates an instrument for recording increasing values. + Counter(name string, opts ...instrument.Option) (Counter, error) + // UpDownCounter creates an instrument for recording changes of a value. + UpDownCounter(name string, opts ...instrument.Option) (UpDownCounter, error) + // Histogram creates an instrument for recording a distribution of values. + Histogram(name string, opts ...instrument.Option) (Histogram, error) +} + +// Counter is an instrument that records increasing values. +// +// Warning: methods may be added to this interface in minor releases. +type Counter interface { + // Add records a change to the counter. + Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) + + instrument.Synchronous +} + +// UpDownCounter is an instrument that records increasing or decreasing values. +// +// Warning: methods may be added to this interface in minor releases. +type UpDownCounter interface { + // Add records a change to the counter. + Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) + + instrument.Synchronous +} + +// Histogram is an instrument that records a distribution of values. +// +// Warning: methods may be added to this interface in minor releases. +type Histogram interface { + // Record adds an additional value to the distribution. + Record(ctx context.Context, incr float64, attrs ...attribute.KeyValue) + + instrument.Synchronous +} diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go new file mode 100644 index 0000000000000..03b5d53e63688 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/instrument/syncint64/syncint64.go @@ -0,0 +1,64 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package syncint64 // import "go.opentelemetry.io/otel/metric/instrument/syncint64" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/instrument" +) + +// InstrumentProvider provides access to individual instruments. +// +// Warning: methods may be added to this interface in minor releases. +type InstrumentProvider interface { + // Counter creates an instrument for recording increasing values. + Counter(name string, opts ...instrument.Option) (Counter, error) + // UpDownCounter creates an instrument for recording changes of a value. + UpDownCounter(name string, opts ...instrument.Option) (UpDownCounter, error) + // Histogram creates an instrument for recording a distribution of values. + Histogram(name string, opts ...instrument.Option) (Histogram, error) +} + +// Counter is an instrument that records increasing values. +// +// Warning: methods may be added to this interface in minor releases. +type Counter interface { + // Add records a change to the counter. + Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) + + instrument.Synchronous +} + +// UpDownCounter is an instrument that records increasing or decreasing values. +// +// Warning: methods may be added to this interface in minor releases. +type UpDownCounter interface { + // Add records a change to the counter. + Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) + + instrument.Synchronous +} + +// Histogram is an instrument that records a distribution of values. +// +// Warning: methods may be added to this interface in minor releases. +type Histogram interface { + // Record adds an additional value to the distribution. + Record(ctx context.Context, incr int64, attrs ...attribute.KeyValue) + + instrument.Synchronous +} diff --git a/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go new file mode 100644 index 0000000000000..7cce2bd15f471 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/internal/global/instruments.go @@ -0,0 +1,360 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/metric/internal/global" + +import ( + "context" + "sync/atomic" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/instrument" + "go.opentelemetry.io/otel/metric/instrument/asyncfloat64" + "go.opentelemetry.io/otel/metric/instrument/asyncint64" + "go.opentelemetry.io/otel/metric/instrument/syncfloat64" + "go.opentelemetry.io/otel/metric/instrument/syncint64" +) + +type afCounter struct { + name string + opts []instrument.Option + + delegate atomic.Value //asyncfloat64.Counter + + instrument.Asynchronous +} + +func (i *afCounter) setDelegate(m metric.Meter) { + ctr, err := m.AsyncFloat64().Counter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afCounter) Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(asyncfloat64.Counter).Observe(ctx, x, attrs...) + } +} + +func (i *afCounter) unwrap() instrument.Asynchronous { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(asyncfloat64.Counter) + } + return nil +} + +type afUpDownCounter struct { + name string + opts []instrument.Option + + delegate atomic.Value //asyncfloat64.UpDownCounter + + instrument.Asynchronous +} + +func (i *afUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.AsyncFloat64().UpDownCounter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afUpDownCounter) Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(asyncfloat64.UpDownCounter).Observe(ctx, x, attrs...) + } +} + +func (i *afUpDownCounter) unwrap() instrument.Asynchronous { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(asyncfloat64.UpDownCounter) + } + return nil +} + +type afGauge struct { + name string + opts []instrument.Option + + delegate atomic.Value //asyncfloat64.Gauge + + instrument.Asynchronous +} + +func (i *afGauge) setDelegate(m metric.Meter) { + ctr, err := m.AsyncFloat64().Gauge(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *afGauge) Observe(ctx context.Context, x float64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(asyncfloat64.Gauge).Observe(ctx, x, attrs...) + } +} + +func (i *afGauge) unwrap() instrument.Asynchronous { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(asyncfloat64.Gauge) + } + return nil +} + +type aiCounter struct { + name string + opts []instrument.Option + + delegate atomic.Value //asyncint64.Counter + + instrument.Asynchronous +} + +func (i *aiCounter) setDelegate(m metric.Meter) { + ctr, err := m.AsyncInt64().Counter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiCounter) Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(asyncint64.Counter).Observe(ctx, x, attrs...) + } +} + +func (i *aiCounter) unwrap() instrument.Asynchronous { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(asyncint64.Counter) + } + return nil +} + +type aiUpDownCounter struct { + name string + opts []instrument.Option + + delegate atomic.Value //asyncint64.UpDownCounter + + instrument.Asynchronous +} + +func (i *aiUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.AsyncInt64().UpDownCounter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiUpDownCounter) Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(asyncint64.UpDownCounter).Observe(ctx, x, attrs...) + } +} + +func (i *aiUpDownCounter) unwrap() instrument.Asynchronous { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(asyncint64.UpDownCounter) + } + return nil +} + +type aiGauge struct { + name string + opts []instrument.Option + + delegate atomic.Value //asyncint64.Gauge + + instrument.Asynchronous +} + +func (i *aiGauge) setDelegate(m metric.Meter) { + ctr, err := m.AsyncInt64().Gauge(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *aiGauge) Observe(ctx context.Context, x int64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(asyncint64.Gauge).Observe(ctx, x, attrs...) + } +} + +func (i *aiGauge) unwrap() instrument.Asynchronous { + if ctr := i.delegate.Load(); ctr != nil { + return ctr.(asyncint64.Gauge) + } + return nil +} + +// Sync Instruments. +type sfCounter struct { + name string + opts []instrument.Option + + delegate atomic.Value //syncfloat64.Counter + + instrument.Synchronous +} + +func (i *sfCounter) setDelegate(m metric.Meter) { + ctr, err := m.SyncFloat64().Counter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfCounter) Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(syncfloat64.Counter).Add(ctx, incr, attrs...) + } +} + +type sfUpDownCounter struct { + name string + opts []instrument.Option + + delegate atomic.Value //syncfloat64.UpDownCounter + + instrument.Synchronous +} + +func (i *sfUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.SyncFloat64().UpDownCounter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(syncfloat64.UpDownCounter).Add(ctx, incr, attrs...) + } +} + +type sfHistogram struct { + name string + opts []instrument.Option + + delegate atomic.Value //syncfloat64.Histogram + + instrument.Synchronous +} + +func (i *sfHistogram) setDelegate(m metric.Meter) { + ctr, err := m.SyncFloat64().Histogram(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *sfHistogram) Record(ctx context.Context, x float64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(syncfloat64.Histogram).Record(ctx, x, attrs...) + } +} + +type siCounter struct { + name string + opts []instrument.Option + + delegate atomic.Value //syncint64.Counter + + instrument.Synchronous +} + +func (i *siCounter) setDelegate(m metric.Meter) { + ctr, err := m.SyncInt64().Counter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siCounter) Add(ctx context.Context, x int64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(syncint64.Counter).Add(ctx, x, attrs...) + } +} + +type siUpDownCounter struct { + name string + opts []instrument.Option + + delegate atomic.Value //syncint64.UpDownCounter + + instrument.Synchronous +} + +func (i *siUpDownCounter) setDelegate(m metric.Meter) { + ctr, err := m.SyncInt64().UpDownCounter(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siUpDownCounter) Add(ctx context.Context, x int64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(syncint64.UpDownCounter).Add(ctx, x, attrs...) + } +} + +type siHistogram struct { + name string + opts []instrument.Option + + delegate atomic.Value //syncint64.Histogram + + instrument.Synchronous +} + +func (i *siHistogram) setDelegate(m metric.Meter) { + ctr, err := m.SyncInt64().Histogram(i.name, i.opts...) + if err != nil { + otel.Handle(err) + return + } + i.delegate.Store(ctr) +} + +func (i *siHistogram) Record(ctx context.Context, x int64, attrs ...attribute.KeyValue) { + if ctr := i.delegate.Load(); ctr != nil { + ctr.(syncint64.Histogram).Record(ctx, x, attrs...) + } +} diff --git a/vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go new file mode 100644 index 0000000000000..0fa924f397c86 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/internal/global/meter.go @@ -0,0 +1,347 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/metric/internal/global" + +import ( + "context" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/instrument" + "go.opentelemetry.io/otel/metric/instrument/asyncfloat64" + "go.opentelemetry.io/otel/metric/instrument/asyncint64" + "go.opentelemetry.io/otel/metric/instrument/syncfloat64" + "go.opentelemetry.io/otel/metric/instrument/syncint64" +) + +// meterProvider is a placeholder for a configured SDK MeterProvider. +// +// All MeterProvider functionality is forwarded to a delegate once +// configured. +type meterProvider struct { + mtx sync.Mutex + meters map[il]*meter + + delegate metric.MeterProvider +} + +type il struct { + name string + version string +} + +// setDelegate configures p to delegate all MeterProvider functionality to +// provider. +// +// All Meters provided prior to this function call are switched out to be +// Meters provided by provider. All instruments and callbacks are recreated and +// delegated. +// +// It is guaranteed by the caller that this happens only once. +func (p *meterProvider) setDelegate(provider metric.MeterProvider) { + p.mtx.Lock() + defer p.mtx.Unlock() + + p.delegate = provider + + if len(p.meters) == 0 { + return + } + + for _, meter := range p.meters { + meter.setDelegate(provider) + } + + p.meters = nil +} + +// Meter implements MeterProvider. +func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter { + p.mtx.Lock() + defer p.mtx.Unlock() + + if p.delegate != nil { + return p.delegate.Meter(name, opts...) + } + + // At this moment it is guaranteed that no sdk is installed, save the meter in the meters map. + + c := metric.NewMeterConfig(opts...) + key := il{ + name: name, + version: c.InstrumentationVersion(), + } + + if p.meters == nil { + p.meters = make(map[il]*meter) + } + + if val, ok := p.meters[key]; ok { + return val + } + + t := &meter{name: name, opts: opts} + p.meters[key] = t + return t +} + +// meter is a placeholder for a metric.Meter. +// +// All Meter functionality is forwarded to a delegate once configured. +// Otherwise, all functionality is forwarded to a NoopMeter. +type meter struct { + name string + opts []metric.MeterOption + + mtx sync.Mutex + instruments []delegatedInstrument + callbacks []delegatedCallback + + delegate atomic.Value // metric.Meter +} + +type delegatedInstrument interface { + setDelegate(metric.Meter) +} + +// setDelegate configures m to delegate all Meter functionality to Meters +// created by provider. +// +// All subsequent calls to the Meter methods will be passed to the delegate. +// +// It is guaranteed by the caller that this happens only once. +func (m *meter) setDelegate(provider metric.MeterProvider) { + meter := provider.Meter(m.name, m.opts...) + m.delegate.Store(meter) + + m.mtx.Lock() + defer m.mtx.Unlock() + + for _, inst := range m.instruments { + inst.setDelegate(meter) + } + + for _, callback := range m.callbacks { + callback.setDelegate(meter) + } + + m.instruments = nil + m.callbacks = nil +} + +// AsyncInt64 is the namespace for the Asynchronous Integer instruments. +// +// To Observe data with instruments it must be registered in a callback. +func (m *meter) AsyncInt64() asyncint64.InstrumentProvider { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.AsyncInt64() + } + return (*aiInstProvider)(m) +} + +// AsyncFloat64 is the namespace for the Asynchronous Float instruments. +// +// To Observe data with instruments it must be registered in a callback. +func (m *meter) AsyncFloat64() asyncfloat64.InstrumentProvider { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.AsyncFloat64() + } + return (*afInstProvider)(m) +} + +// RegisterCallback captures the function that will be called during Collect. +// +// It is only valid to call Observe within the scope of the passed function, +// and only on the instruments that were registered with this call. +func (m *meter) RegisterCallback(insts []instrument.Asynchronous, function func(context.Context)) error { + if del, ok := m.delegate.Load().(metric.Meter); ok { + insts = unwrapInstruments(insts) + return del.RegisterCallback(insts, function) + } + + m.mtx.Lock() + defer m.mtx.Unlock() + m.callbacks = append(m.callbacks, delegatedCallback{ + instruments: insts, + function: function, + }) + + return nil +} + +type wrapped interface { + unwrap() instrument.Asynchronous +} + +func unwrapInstruments(instruments []instrument.Asynchronous) []instrument.Asynchronous { + out := make([]instrument.Asynchronous, 0, len(instruments)) + + for _, inst := range instruments { + if in, ok := inst.(wrapped); ok { + out = append(out, in.unwrap()) + } else { + out = append(out, inst) + } + } + + return out +} + +// SyncInt64 is the namespace for the Synchronous Integer instruments. +func (m *meter) SyncInt64() syncint64.InstrumentProvider { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.SyncInt64() + } + return (*siInstProvider)(m) +} + +// SyncFloat64 is the namespace for the Synchronous Float instruments. +func (m *meter) SyncFloat64() syncfloat64.InstrumentProvider { + if del, ok := m.delegate.Load().(metric.Meter); ok { + return del.SyncFloat64() + } + return (*sfInstProvider)(m) +} + +type delegatedCallback struct { + instruments []instrument.Asynchronous + function func(context.Context) +} + +func (c *delegatedCallback) setDelegate(m metric.Meter) { + insts := unwrapInstruments(c.instruments) + err := m.RegisterCallback(insts, c.function) + if err != nil { + otel.Handle(err) + } +} + +type afInstProvider meter + +// Counter creates an instrument for recording increasing values. +func (ip *afInstProvider) Counter(name string, opts ...instrument.Option) (asyncfloat64.Counter, error) { + ip.mtx.Lock() + defer ip.mtx.Unlock() + ctr := &afCounter{name: name, opts: opts} + ip.instruments = append(ip.instruments, ctr) + return ctr, nil +} + +// UpDownCounter creates an instrument for recording changes of a value. +func (ip *afInstProvider) UpDownCounter(name string, opts ...instrument.Option) (asyncfloat64.UpDownCounter, error) { + ip.mtx.Lock() + defer ip.mtx.Unlock() + ctr := &afUpDownCounter{name: name, opts: opts} + ip.instruments = append(ip.instruments, ctr) + return ctr, nil +} + +// Gauge creates an instrument for recording the current value. +func (ip *afInstProvider) Gauge(name string, opts ...instrument.Option) (asyncfloat64.Gauge, error) { + ip.mtx.Lock() + defer ip.mtx.Unlock() + ctr := &afGauge{name: name, opts: opts} + ip.instruments = append(ip.instruments, ctr) + return ctr, nil +} + +type aiInstProvider meter + +// Counter creates an instrument for recording increasing values. +func (ip *aiInstProvider) Counter(name string, opts ...instrument.Option) (asyncint64.Counter, error) { + ip.mtx.Lock() + defer ip.mtx.Unlock() + ctr := &aiCounter{name: name, opts: opts} + ip.instruments = append(ip.instruments, ctr) + return ctr, nil +} + +// UpDownCounter creates an instrument for recording changes of a value. +func (ip *aiInstProvider) UpDownCounter(name string, opts ...instrument.Option) (asyncint64.UpDownCounter, error) { + ip.mtx.Lock() + defer ip.mtx.Unlock() + ctr := &aiUpDownCounter{name: name, opts: opts} + ip.instruments = append(ip.instruments, ctr) + return ctr, nil +} + +// Gauge creates an instrument for recording the current value. +func (ip *aiInstProvider) Gauge(name string, opts ...instrument.Option) (asyncint64.Gauge, error) { + ip.mtx.Lock() + defer ip.mtx.Unlock() + ctr := &aiGauge{name: name, opts: opts} + ip.instruments = append(ip.instruments, ctr) + return ctr, nil +} + +type sfInstProvider meter + +// Counter creates an instrument for recording increasing values. +func (ip *sfInstProvider) Counter(name string, opts ...instrument.Option) (syncfloat64.Counter, error) { + ip.mtx.Lock() + defer ip.mtx.Unlock() + ctr := &sfCounter{name: name, opts: opts} + ip.instruments = append(ip.instruments, ctr) + return ctr, nil +} + +// UpDownCounter creates an instrument for recording changes of a value. +func (ip *sfInstProvider) UpDownCounter(name string, opts ...instrument.Option) (syncfloat64.UpDownCounter, error) { + ip.mtx.Lock() + defer ip.mtx.Unlock() + ctr := &sfUpDownCounter{name: name, opts: opts} + ip.instruments = append(ip.instruments, ctr) + return ctr, nil +} + +// Histogram creates an instrument for recording a distribution of values. +func (ip *sfInstProvider) Histogram(name string, opts ...instrument.Option) (syncfloat64.Histogram, error) { + ip.mtx.Lock() + defer ip.mtx.Unlock() + ctr := &sfHistogram{name: name, opts: opts} + ip.instruments = append(ip.instruments, ctr) + return ctr, nil +} + +type siInstProvider meter + +// Counter creates an instrument for recording increasing values. +func (ip *siInstProvider) Counter(name string, opts ...instrument.Option) (syncint64.Counter, error) { + ip.mtx.Lock() + defer ip.mtx.Unlock() + ctr := &siCounter{name: name, opts: opts} + ip.instruments = append(ip.instruments, ctr) + return ctr, nil +} + +// UpDownCounter creates an instrument for recording changes of a value. +func (ip *siInstProvider) UpDownCounter(name string, opts ...instrument.Option) (syncint64.UpDownCounter, error) { + ip.mtx.Lock() + defer ip.mtx.Unlock() + ctr := &siUpDownCounter{name: name, opts: opts} + ip.instruments = append(ip.instruments, ctr) + return ctr, nil +} + +// Histogram creates an instrument for recording a distribution of values. +func (ip *siInstProvider) Histogram(name string, opts ...instrument.Option) (syncint64.Histogram, error) { + ip.mtx.Lock() + defer ip.mtx.Unlock() + ctr := &siHistogram{name: name, opts: opts} + ip.instruments = append(ip.instruments, ctr) + return ctr, nil +} diff --git a/vendor/go.opentelemetry.io/otel/internal/metric/global/metric.go b/vendor/go.opentelemetry.io/otel/metric/internal/global/state.go similarity index 54% rename from vendor/go.opentelemetry.io/otel/internal/metric/global/metric.go rename to vendor/go.opentelemetry.io/otel/metric/internal/global/state.go index 896c6dd1c30bb..47c0d787d8a6a 100644 --- a/vendor/go.opentelemetry.io/otel/internal/metric/global/metric.go +++ b/vendor/go.opentelemetry.io/otel/metric/internal/global/state.go @@ -4,7 +4,7 @@ // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// htmp://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -12,55 +12,57 @@ // See the License for the specific language governing permissions and // limitations under the License. -package global // import "go.opentelemetry.io/otel/internal/metric/global" +package global // import "go.opentelemetry.io/otel/metric/internal/global" import ( + "errors" "sync" "sync/atomic" + "go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/metric" ) -type meterProviderHolder struct { - mp metric.MeterProvider -} - var ( - globalMeter = defaultMeterValue() + globalMeterProvider = defaultMeterProvider() delegateMeterOnce sync.Once ) +type meterProviderHolder struct { + mp metric.MeterProvider +} + // MeterProvider is the internal implementation for global.MeterProvider. func MeterProvider() metric.MeterProvider { - return globalMeter.Load().(meterProviderHolder).mp + return globalMeterProvider.Load().(meterProviderHolder).mp } // SetMeterProvider is the internal implementation for global.SetMeterProvider. func SetMeterProvider(mp metric.MeterProvider) { - delegateMeterOnce.Do(func() { - current := MeterProvider() + current := MeterProvider() + if _, cOk := current.(*meterProvider); cOk { + if _, mpOk := mp.(*meterProvider); mpOk && current == mp { + // Do not assign the default delegating MeterProvider to delegate + // to itself. + global.Error( + errors.New("no delegate configured in meter provider"), + "Setting meter provider to it's current value. No delegate will be configured", + ) + return + } + } - if current == mp { - // Setting the provider to the prior default is nonsense, panic. - // Panic is acceptable because we are likely still early in the - // process lifetime. - panic("invalid MeterProvider, the global instance cannot be reinstalled") - } else if def, ok := current.(*meterProvider); ok { + delegateMeterOnce.Do(func() { + if def, ok := current.(*meterProvider); ok { def.setDelegate(mp) } }) - globalMeter.Store(meterProviderHolder{mp: mp}) + globalMeterProvider.Store(meterProviderHolder{mp: mp}) } -func defaultMeterValue() *atomic.Value { +func defaultMeterProvider() *atomic.Value { v := &atomic.Value{} - v.Store(meterProviderHolder{mp: newMeterProvider()}) + v.Store(meterProviderHolder{mp: &meterProvider{}}) return v } - -// ResetForTest restores the initial global state, for testing purposes. -func ResetForTest() { - globalMeter = defaultMeterValue() - delegateMeterOnce = sync.Once{} -} diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go new file mode 100644 index 0000000000000..23e6853afbb63 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/meter.go @@ -0,0 +1,64 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric/instrument" + "go.opentelemetry.io/otel/metric/instrument/asyncfloat64" + "go.opentelemetry.io/otel/metric/instrument/asyncint64" + "go.opentelemetry.io/otel/metric/instrument/syncfloat64" + "go.opentelemetry.io/otel/metric/instrument/syncint64" +) + +// MeterProvider provides access to named Meter instances, for instrumenting +// an application or library. +// +// Warning: methods may be added to this interface in minor releases. +type MeterProvider interface { + // Meter creates an instance of a `Meter` interface. The instrumentationName + // must be the name of the library providing instrumentation. This name may + // be the same as the instrumented code only if that code provides built-in + // instrumentation. If the instrumentationName is empty, then a + // implementation defined default name will be used instead. + Meter(instrumentationName string, opts ...MeterOption) Meter +} + +// Meter provides access to instrument instances for recording metrics. +// +// Warning: methods may be added to this interface in minor releases. +type Meter interface { + // AsyncInt64 is the namespace for the Asynchronous Integer instruments. + // + // To Observe data with instruments it must be registered in a callback. + AsyncInt64() asyncint64.InstrumentProvider + + // AsyncFloat64 is the namespace for the Asynchronous Float instruments + // + // To Observe data with instruments it must be registered in a callback. + AsyncFloat64() asyncfloat64.InstrumentProvider + + // RegisterCallback captures the function that will be called during Collect. + // + // It is only valid to call Observe within the scope of the passed function, + // and only on the instruments that were registered with this call. + RegisterCallback(insts []instrument.Asynchronous, function func(context.Context)) error + + // SyncInt64 is the namespace for the Synchronous Integer instruments + SyncInt64() syncint64.InstrumentProvider + // SyncFloat64 is the namespace for the Synchronous Float instruments + SyncFloat64() syncfloat64.InstrumentProvider +} diff --git a/vendor/go.opentelemetry.io/otel/metric/metric.go b/vendor/go.opentelemetry.io/otel/metric/metric.go deleted file mode 100644 index d8c5a6b3f353f..0000000000000 --- a/vendor/go.opentelemetry.io/otel/metric/metric.go +++ /dev/null @@ -1,538 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric // import "go.opentelemetry.io/otel/metric" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/number" - "go.opentelemetry.io/otel/metric/sdkapi" -) - -// MeterProvider supports named Meter instances. -type MeterProvider interface { - // Meter creates an implementation of the Meter interface. - // The instrumentationName must be the name of the library providing - // instrumentation. This name may be the same as the instrumented code - // only if that code provides built-in instrumentation. If the - // instrumentationName is empty, then a implementation defined default - // name will be used instead. - Meter(instrumentationName string, opts ...MeterOption) Meter -} - -// Meter is the creator of metric instruments. -// -// An uninitialized Meter is a no-op implementation. -type Meter struct { - impl sdkapi.MeterImpl -} - -// WrapMeterImpl constructs a `Meter` implementation from a -// `MeterImpl` implementation. -func WrapMeterImpl(impl sdkapi.MeterImpl) Meter { - return Meter{ - impl: impl, - } -} - -// Measurement is used for reporting a synchronous batch of metric -// values. Instances of this type should be created by synchronous -// instruments (e.g., Int64Counter.Measurement()). -// -// Note: This is an alias because it is a first-class member of the -// API but is also part of the lower-level sdkapi interface. -type Measurement = sdkapi.Measurement - -// Observation is used for reporting an asynchronous batch of metric -// values. Instances of this type should be created by asynchronous -// instruments (e.g., Int64GaugeObserver.Observation()). -// -// Note: This is an alias because it is a first-class member of the -// API but is also part of the lower-level sdkapi interface. -type Observation = sdkapi.Observation - -// RecordBatch atomically records a batch of measurements. -func (m Meter) RecordBatch(ctx context.Context, ls []attribute.KeyValue, ms ...Measurement) { - if m.impl == nil { - return - } - m.impl.RecordBatch(ctx, ls, ms...) -} - -// NewBatchObserver creates a new BatchObserver that supports -// making batches of observations for multiple instruments. -func (m Meter) NewBatchObserver(callback BatchObserverFunc) BatchObserver { - return BatchObserver{ - meter: m, - runner: newBatchAsyncRunner(callback), - } -} - -// NewInt64Counter creates a new integer Counter instrument with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewInt64Counter(name string, options ...InstrumentOption) (Int64Counter, error) { - return wrapInt64CounterInstrument( - m.newSync(name, sdkapi.CounterInstrumentKind, number.Int64Kind, options)) -} - -// NewFloat64Counter creates a new floating point Counter with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewFloat64Counter(name string, options ...InstrumentOption) (Float64Counter, error) { - return wrapFloat64CounterInstrument( - m.newSync(name, sdkapi.CounterInstrumentKind, number.Float64Kind, options)) -} - -// NewInt64UpDownCounter creates a new integer UpDownCounter instrument with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewInt64UpDownCounter(name string, options ...InstrumentOption) (Int64UpDownCounter, error) { - return wrapInt64UpDownCounterInstrument( - m.newSync(name, sdkapi.UpDownCounterInstrumentKind, number.Int64Kind, options)) -} - -// NewFloat64UpDownCounter creates a new floating point UpDownCounter with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewFloat64UpDownCounter(name string, options ...InstrumentOption) (Float64UpDownCounter, error) { - return wrapFloat64UpDownCounterInstrument( - m.newSync(name, sdkapi.UpDownCounterInstrumentKind, number.Float64Kind, options)) -} - -// NewInt64Histogram creates a new integer Histogram instrument with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewInt64Histogram(name string, opts ...InstrumentOption) (Int64Histogram, error) { - return wrapInt64HistogramInstrument( - m.newSync(name, sdkapi.HistogramInstrumentKind, number.Int64Kind, opts)) -} - -// NewFloat64Histogram creates a new floating point Histogram with the -// given name, customized with options. May return an error if the -// name is invalid (e.g., empty) or improperly registered (e.g., -// duplicate registration). -func (m Meter) NewFloat64Histogram(name string, opts ...InstrumentOption) (Float64Histogram, error) { - return wrapFloat64HistogramInstrument( - m.newSync(name, sdkapi.HistogramInstrumentKind, number.Float64Kind, opts)) -} - -// NewInt64GaugeObserver creates a new integer GaugeObserver instrument -// with the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewInt64GaugeObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64GaugeObserver, error) { - if callback == nil { - return wrapInt64GaugeObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapInt64GaugeObserverInstrument( - m.newAsync(name, sdkapi.GaugeObserverInstrumentKind, number.Int64Kind, opts, - newInt64AsyncRunner(callback))) -} - -// NewFloat64GaugeObserver creates a new floating point GaugeObserver with -// the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewFloat64GaugeObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64GaugeObserver, error) { - if callback == nil { - return wrapFloat64GaugeObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapFloat64GaugeObserverInstrument( - m.newAsync(name, sdkapi.GaugeObserverInstrumentKind, number.Float64Kind, opts, - newFloat64AsyncRunner(callback))) -} - -// NewInt64CounterObserver creates a new integer CounterObserver instrument -// with the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewInt64CounterObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64CounterObserver, error) { - if callback == nil { - return wrapInt64CounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapInt64CounterObserverInstrument( - m.newAsync(name, sdkapi.CounterObserverInstrumentKind, number.Int64Kind, opts, - newInt64AsyncRunner(callback))) -} - -// NewFloat64CounterObserver creates a new floating point CounterObserver with -// the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewFloat64CounterObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64CounterObserver, error) { - if callback == nil { - return wrapFloat64CounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapFloat64CounterObserverInstrument( - m.newAsync(name, sdkapi.CounterObserverInstrumentKind, number.Float64Kind, opts, - newFloat64AsyncRunner(callback))) -} - -// NewInt64UpDownCounterObserver creates a new integer UpDownCounterObserver instrument -// with the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewInt64UpDownCounterObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64UpDownCounterObserver, error) { - if callback == nil { - return wrapInt64UpDownCounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapInt64UpDownCounterObserverInstrument( - m.newAsync(name, sdkapi.UpDownCounterObserverInstrumentKind, number.Int64Kind, opts, - newInt64AsyncRunner(callback))) -} - -// NewFloat64UpDownCounterObserver creates a new floating point UpDownCounterObserver with -// the given name, running a given callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (m Meter) NewFloat64UpDownCounterObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64UpDownCounterObserver, error) { - if callback == nil { - return wrapFloat64UpDownCounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapFloat64UpDownCounterObserverInstrument( - m.newAsync(name, sdkapi.UpDownCounterObserverInstrumentKind, number.Float64Kind, opts, - newFloat64AsyncRunner(callback))) -} - -// NewInt64GaugeObserver creates a new integer GaugeObserver instrument -// with the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewInt64GaugeObserver(name string, opts ...InstrumentOption) (Int64GaugeObserver, error) { - if b.runner == nil { - return wrapInt64GaugeObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapInt64GaugeObserverInstrument( - b.meter.newAsync(name, sdkapi.GaugeObserverInstrumentKind, number.Int64Kind, opts, b.runner)) -} - -// NewFloat64GaugeObserver creates a new floating point GaugeObserver with -// the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewFloat64GaugeObserver(name string, opts ...InstrumentOption) (Float64GaugeObserver, error) { - if b.runner == nil { - return wrapFloat64GaugeObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapFloat64GaugeObserverInstrument( - b.meter.newAsync(name, sdkapi.GaugeObserverInstrumentKind, number.Float64Kind, opts, - b.runner)) -} - -// NewInt64CounterObserver creates a new integer CounterObserver instrument -// with the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewInt64CounterObserver(name string, opts ...InstrumentOption) (Int64CounterObserver, error) { - if b.runner == nil { - return wrapInt64CounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapInt64CounterObserverInstrument( - b.meter.newAsync(name, sdkapi.CounterObserverInstrumentKind, number.Int64Kind, opts, b.runner)) -} - -// NewFloat64CounterObserver creates a new floating point CounterObserver with -// the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewFloat64CounterObserver(name string, opts ...InstrumentOption) (Float64CounterObserver, error) { - if b.runner == nil { - return wrapFloat64CounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapFloat64CounterObserverInstrument( - b.meter.newAsync(name, sdkapi.CounterObserverInstrumentKind, number.Float64Kind, opts, - b.runner)) -} - -// NewInt64UpDownCounterObserver creates a new integer UpDownCounterObserver instrument -// with the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewInt64UpDownCounterObserver(name string, opts ...InstrumentOption) (Int64UpDownCounterObserver, error) { - if b.runner == nil { - return wrapInt64UpDownCounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapInt64UpDownCounterObserverInstrument( - b.meter.newAsync(name, sdkapi.UpDownCounterObserverInstrumentKind, number.Int64Kind, opts, b.runner)) -} - -// NewFloat64UpDownCounterObserver creates a new floating point UpDownCounterObserver with -// the given name, running in a batch callback, and customized with -// options. May return an error if the name is invalid (e.g., empty) -// or improperly registered (e.g., duplicate registration). -func (b BatchObserver) NewFloat64UpDownCounterObserver(name string, opts ...InstrumentOption) (Float64UpDownCounterObserver, error) { - if b.runner == nil { - return wrapFloat64UpDownCounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) - } - return wrapFloat64UpDownCounterObserverInstrument( - b.meter.newAsync(name, sdkapi.UpDownCounterObserverInstrumentKind, number.Float64Kind, opts, - b.runner)) -} - -// MeterImpl returns the underlying MeterImpl of this Meter. -func (m Meter) MeterImpl() sdkapi.MeterImpl { - return m.impl -} - -// newAsync constructs one new asynchronous instrument. -func (m Meter) newAsync( - name string, - mkind sdkapi.InstrumentKind, - nkind number.Kind, - opts []InstrumentOption, - runner sdkapi.AsyncRunner, -) ( - sdkapi.AsyncImpl, - error, -) { - if m.impl == nil { - return sdkapi.NewNoopAsyncInstrument(), nil - } - cfg := NewInstrumentConfig(opts...) - desc := sdkapi.NewDescriptor(name, mkind, nkind, cfg.description, cfg.unit) - return m.impl.NewAsyncInstrument(desc, runner) -} - -// newSync constructs one new synchronous instrument. -func (m Meter) newSync( - name string, - metricKind sdkapi.InstrumentKind, - numberKind number.Kind, - opts []InstrumentOption, -) ( - sdkapi.SyncImpl, - error, -) { - if m.impl == nil { - return sdkapi.NewNoopSyncInstrument(), nil - } - cfg := NewInstrumentConfig(opts...) - desc := sdkapi.NewDescriptor(name, metricKind, numberKind, cfg.description, cfg.unit) - return m.impl.NewSyncInstrument(desc) -} - -// MeterMust is a wrapper for Meter interfaces that panics when any -// instrument constructor encounters an error. -type MeterMust struct { - meter Meter -} - -// BatchObserverMust is a wrapper for BatchObserver that panics when -// any instrument constructor encounters an error. -type BatchObserverMust struct { - batch BatchObserver -} - -// Must constructs a MeterMust implementation from a Meter, allowing -// the application to panic when any instrument constructor yields an -// error. -func Must(meter Meter) MeterMust { - return MeterMust{meter: meter} -} - -// NewInt64Counter calls `Meter.NewInt64Counter` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64Counter(name string, cos ...InstrumentOption) Int64Counter { - if inst, err := mm.meter.NewInt64Counter(name, cos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64Counter calls `Meter.NewFloat64Counter` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64Counter(name string, cos ...InstrumentOption) Float64Counter { - if inst, err := mm.meter.NewFloat64Counter(name, cos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64UpDownCounter calls `Meter.NewInt64UpDownCounter` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64UpDownCounter(name string, cos ...InstrumentOption) Int64UpDownCounter { - if inst, err := mm.meter.NewInt64UpDownCounter(name, cos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64UpDownCounter calls `Meter.NewFloat64UpDownCounter` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64UpDownCounter(name string, cos ...InstrumentOption) Float64UpDownCounter { - if inst, err := mm.meter.NewFloat64UpDownCounter(name, cos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64Histogram calls `Meter.NewInt64Histogram` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64Histogram(name string, mos ...InstrumentOption) Int64Histogram { - if inst, err := mm.meter.NewInt64Histogram(name, mos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64Histogram calls `Meter.NewFloat64Histogram` and returns the -// instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64Histogram(name string, mos ...InstrumentOption) Float64Histogram { - if inst, err := mm.meter.NewFloat64Histogram(name, mos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64GaugeObserver calls `Meter.NewInt64GaugeObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64GaugeObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64GaugeObserver { - if inst, err := mm.meter.NewInt64GaugeObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64GaugeObserver calls `Meter.NewFloat64GaugeObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64GaugeObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64GaugeObserver { - if inst, err := mm.meter.NewFloat64GaugeObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64CounterObserver calls `Meter.NewInt64CounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64CounterObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64CounterObserver { - if inst, err := mm.meter.NewInt64CounterObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64CounterObserver calls `Meter.NewFloat64CounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64CounterObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64CounterObserver { - if inst, err := mm.meter.NewFloat64CounterObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64UpDownCounterObserver calls `Meter.NewInt64UpDownCounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewInt64UpDownCounterObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64UpDownCounterObserver { - if inst, err := mm.meter.NewInt64UpDownCounterObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64UpDownCounterObserver calls `Meter.NewFloat64UpDownCounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (mm MeterMust) NewFloat64UpDownCounterObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64UpDownCounterObserver { - if inst, err := mm.meter.NewFloat64UpDownCounterObserver(name, callback, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewBatchObserver returns a wrapper around BatchObserver that panics -// when any instrument constructor returns an error. -func (mm MeterMust) NewBatchObserver(callback BatchObserverFunc) BatchObserverMust { - return BatchObserverMust{ - batch: mm.meter.NewBatchObserver(callback), - } -} - -// NewInt64GaugeObserver calls `BatchObserver.NewInt64GaugeObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewInt64GaugeObserver(name string, oos ...InstrumentOption) Int64GaugeObserver { - if inst, err := bm.batch.NewInt64GaugeObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64GaugeObserver calls `BatchObserver.NewFloat64GaugeObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewFloat64GaugeObserver(name string, oos ...InstrumentOption) Float64GaugeObserver { - if inst, err := bm.batch.NewFloat64GaugeObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64CounterObserver calls `BatchObserver.NewInt64CounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewInt64CounterObserver(name string, oos ...InstrumentOption) Int64CounterObserver { - if inst, err := bm.batch.NewInt64CounterObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64CounterObserver calls `BatchObserver.NewFloat64CounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewFloat64CounterObserver(name string, oos ...InstrumentOption) Float64CounterObserver { - if inst, err := bm.batch.NewFloat64CounterObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewInt64UpDownCounterObserver calls `BatchObserver.NewInt64UpDownCounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewInt64UpDownCounterObserver(name string, oos ...InstrumentOption) Int64UpDownCounterObserver { - if inst, err := bm.batch.NewInt64UpDownCounterObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} - -// NewFloat64UpDownCounterObserver calls `BatchObserver.NewFloat64UpDownCounterObserver` and -// returns the instrument, panicking if it encounters an error. -func (bm BatchObserverMust) NewFloat64UpDownCounterObserver(name string, oos ...InstrumentOption) Float64UpDownCounterObserver { - if inst, err := bm.batch.NewFloat64UpDownCounterObserver(name, oos...); err != nil { - panic(err) - } else { - return inst - } -} diff --git a/vendor/go.opentelemetry.io/otel/metric/metric_instrument.go b/vendor/go.opentelemetry.io/otel/metric/metric_instrument.go deleted file mode 100644 index 2da24c8f211ef..0000000000000 --- a/vendor/go.opentelemetry.io/otel/metric/metric_instrument.go +++ /dev/null @@ -1,464 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric // import "go.opentelemetry.io/otel/metric" - -import ( - "context" - "errors" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/number" - "go.opentelemetry.io/otel/metric/sdkapi" -) - -// ErrSDKReturnedNilImpl is returned when a new `MeterImpl` returns nil. -var ErrSDKReturnedNilImpl = errors.New("SDK returned a nil implementation") - -// Int64ObserverFunc is a type of callback that integral -// observers run. -type Int64ObserverFunc func(context.Context, Int64ObserverResult) - -// Float64ObserverFunc is a type of callback that floating point -// observers run. -type Float64ObserverFunc func(context.Context, Float64ObserverResult) - -// BatchObserverFunc is a callback argument for use with any -// Observer instrument that will be reported as a batch of -// observations. -type BatchObserverFunc func(context.Context, BatchObserverResult) - -// Int64ObserverResult is passed to an observer callback to capture -// observations for one asynchronous integer metric instrument. -type Int64ObserverResult struct { - instrument sdkapi.AsyncImpl - function func([]attribute.KeyValue, ...Observation) -} - -// Float64ObserverResult is passed to an observer callback to capture -// observations for one asynchronous floating point metric instrument. -type Float64ObserverResult struct { - instrument sdkapi.AsyncImpl - function func([]attribute.KeyValue, ...Observation) -} - -// BatchObserverResult is passed to a batch observer callback to -// capture observations for multiple asynchronous instruments. -type BatchObserverResult struct { - function func([]attribute.KeyValue, ...Observation) -} - -// Observe captures a single integer value from the associated -// instrument callback, with the given labels. -func (ir Int64ObserverResult) Observe(value int64, labels ...attribute.KeyValue) { - ir.function(labels, sdkapi.NewObservation(ir.instrument, number.NewInt64Number(value))) -} - -// Observe captures a single floating point value from the associated -// instrument callback, with the given labels. -func (fr Float64ObserverResult) Observe(value float64, labels ...attribute.KeyValue) { - fr.function(labels, sdkapi.NewObservation(fr.instrument, number.NewFloat64Number(value))) -} - -// Observe captures a multiple observations from the associated batch -// instrument callback, with the given labels. -func (br BatchObserverResult) Observe(labels []attribute.KeyValue, obs ...Observation) { - br.function(labels, obs...) -} - -var _ sdkapi.AsyncSingleRunner = (*Int64ObserverFunc)(nil) -var _ sdkapi.AsyncSingleRunner = (*Float64ObserverFunc)(nil) -var _ sdkapi.AsyncBatchRunner = (*BatchObserverFunc)(nil) - -// newInt64AsyncRunner returns a single-observer callback for integer Observer instruments. -func newInt64AsyncRunner(c Int64ObserverFunc) sdkapi.AsyncSingleRunner { - return &c -} - -// newFloat64AsyncRunner returns a single-observer callback for floating point Observer instruments. -func newFloat64AsyncRunner(c Float64ObserverFunc) sdkapi.AsyncSingleRunner { - return &c -} - -// newBatchAsyncRunner returns a batch-observer callback use with multiple Observer instruments. -func newBatchAsyncRunner(c BatchObserverFunc) sdkapi.AsyncBatchRunner { - return &c -} - -// AnyRunner implements AsyncRunner. -func (*Int64ObserverFunc) AnyRunner() {} - -// AnyRunner implements AsyncRunner. -func (*Float64ObserverFunc) AnyRunner() {} - -// AnyRunner implements AsyncRunner. -func (*BatchObserverFunc) AnyRunner() {} - -// Run implements AsyncSingleRunner. -func (i *Int64ObserverFunc) Run(ctx context.Context, impl sdkapi.AsyncImpl, function func([]attribute.KeyValue, ...Observation)) { - (*i)(ctx, Int64ObserverResult{ - instrument: impl, - function: function, - }) -} - -// Run implements AsyncSingleRunner. -func (f *Float64ObserverFunc) Run(ctx context.Context, impl sdkapi.AsyncImpl, function func([]attribute.KeyValue, ...Observation)) { - (*f)(ctx, Float64ObserverResult{ - instrument: impl, - function: function, - }) -} - -// Run implements AsyncBatchRunner. -func (b *BatchObserverFunc) Run(ctx context.Context, function func([]attribute.KeyValue, ...Observation)) { - (*b)(ctx, BatchObserverResult{ - function: function, - }) -} - -// wrapInt64GaugeObserverInstrument converts an AsyncImpl into Int64GaugeObserver. -func wrapInt64GaugeObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Int64GaugeObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Int64GaugeObserver{asyncInstrument: common}, err -} - -// wrapFloat64GaugeObserverInstrument converts an AsyncImpl into Float64GaugeObserver. -func wrapFloat64GaugeObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Float64GaugeObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Float64GaugeObserver{asyncInstrument: common}, err -} - -// wrapInt64CounterObserverInstrument converts an AsyncImpl into Int64CounterObserver. -func wrapInt64CounterObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Int64CounterObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Int64CounterObserver{asyncInstrument: common}, err -} - -// wrapFloat64CounterObserverInstrument converts an AsyncImpl into Float64CounterObserver. -func wrapFloat64CounterObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Float64CounterObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Float64CounterObserver{asyncInstrument: common}, err -} - -// wrapInt64UpDownCounterObserverInstrument converts an AsyncImpl into Int64UpDownCounterObserver. -func wrapInt64UpDownCounterObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Int64UpDownCounterObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Int64UpDownCounterObserver{asyncInstrument: common}, err -} - -// wrapFloat64UpDownCounterObserverInstrument converts an AsyncImpl into Float64UpDownCounterObserver. -func wrapFloat64UpDownCounterObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Float64UpDownCounterObserver, error) { - common, err := checkNewAsync(asyncInst, err) - return Float64UpDownCounterObserver{asyncInstrument: common}, err -} - -// BatchObserver represents an Observer callback that can report -// observations for multiple instruments. -type BatchObserver struct { - meter Meter - runner sdkapi.AsyncBatchRunner -} - -// Int64GaugeObserver is a metric that captures a set of int64 values at a -// point in time. -type Int64GaugeObserver struct { - asyncInstrument -} - -// Float64GaugeObserver is a metric that captures a set of float64 values -// at a point in time. -type Float64GaugeObserver struct { - asyncInstrument -} - -// Int64CounterObserver is a metric that captures a precomputed sum of -// int64 values at a point in time. -type Int64CounterObserver struct { - asyncInstrument -} - -// Float64CounterObserver is a metric that captures a precomputed sum of -// float64 values at a point in time. -type Float64CounterObserver struct { - asyncInstrument -} - -// Int64UpDownCounterObserver is a metric that captures a precomputed sum of -// int64 values at a point in time. -type Int64UpDownCounterObserver struct { - asyncInstrument -} - -// Float64UpDownCounterObserver is a metric that captures a precomputed sum of -// float64 values at a point in time. -type Float64UpDownCounterObserver struct { - asyncInstrument -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (i Int64GaugeObserver) Observation(v int64) Observation { - return sdkapi.NewObservation(i.instrument, number.NewInt64Number(v)) -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (f Float64GaugeObserver) Observation(v float64) Observation { - return sdkapi.NewObservation(f.instrument, number.NewFloat64Number(v)) -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (i Int64CounterObserver) Observation(v int64) Observation { - return sdkapi.NewObservation(i.instrument, number.NewInt64Number(v)) -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (f Float64CounterObserver) Observation(v float64) Observation { - return sdkapi.NewObservation(f.instrument, number.NewFloat64Number(v)) -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (i Int64UpDownCounterObserver) Observation(v int64) Observation { - return sdkapi.NewObservation(i.instrument, number.NewInt64Number(v)) -} - -// Observation returns an Observation, a BatchObserverFunc -// argument, for an asynchronous integer instrument. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (f Float64UpDownCounterObserver) Observation(v float64) Observation { - return sdkapi.NewObservation(f.instrument, number.NewFloat64Number(v)) -} - -// syncInstrument contains a SyncImpl. -type syncInstrument struct { - instrument sdkapi.SyncImpl -} - -// asyncInstrument contains a AsyncImpl. -type asyncInstrument struct { - instrument sdkapi.AsyncImpl -} - -// AsyncImpl implements AsyncImpl. -func (a asyncInstrument) AsyncImpl() sdkapi.AsyncImpl { - return a.instrument -} - -// SyncImpl returns the implementation object for synchronous instruments. -func (s syncInstrument) SyncImpl() sdkapi.SyncImpl { - return s.instrument -} - -func (s syncInstrument) float64Measurement(value float64) Measurement { - return sdkapi.NewMeasurement(s.instrument, number.NewFloat64Number(value)) -} - -func (s syncInstrument) int64Measurement(value int64) Measurement { - return sdkapi.NewMeasurement(s.instrument, number.NewInt64Number(value)) -} - -func (s syncInstrument) directRecord(ctx context.Context, number number.Number, labels []attribute.KeyValue) { - s.instrument.RecordOne(ctx, number, labels) -} - -// checkNewAsync receives an AsyncImpl and potential -// error, and returns the same types, checking for and ensuring that -// the returned interface is not nil. -func checkNewAsync(instrument sdkapi.AsyncImpl, err error) (asyncInstrument, error) { - if instrument == nil { - if err == nil { - err = ErrSDKReturnedNilImpl - } - instrument = sdkapi.NewNoopAsyncInstrument() - } - return asyncInstrument{ - instrument: instrument, - }, err -} - -// checkNewSync receives an SyncImpl and potential -// error, and returns the same types, checking for and ensuring that -// the returned interface is not nil. -func checkNewSync(instrument sdkapi.SyncImpl, err error) (syncInstrument, error) { - if instrument == nil { - if err == nil { - err = ErrSDKReturnedNilImpl - } - // Note: an alternate behavior would be to synthesize a new name - // or group all duplicately-named instruments of a certain type - // together and use a tag for the original name, e.g., - // name = 'invalid.counter.int64' - // label = 'original-name=duplicate-counter-name' - instrument = sdkapi.NewNoopSyncInstrument() - } - return syncInstrument{ - instrument: instrument, - }, err -} - -// wrapInt64CounterInstrument converts a SyncImpl into Int64Counter. -func wrapInt64CounterInstrument(syncInst sdkapi.SyncImpl, err error) (Int64Counter, error) { - common, err := checkNewSync(syncInst, err) - return Int64Counter{syncInstrument: common}, err -} - -// wrapFloat64CounterInstrument converts a SyncImpl into Float64Counter. -func wrapFloat64CounterInstrument(syncInst sdkapi.SyncImpl, err error) (Float64Counter, error) { - common, err := checkNewSync(syncInst, err) - return Float64Counter{syncInstrument: common}, err -} - -// wrapInt64UpDownCounterInstrument converts a SyncImpl into Int64UpDownCounter. -func wrapInt64UpDownCounterInstrument(syncInst sdkapi.SyncImpl, err error) (Int64UpDownCounter, error) { - common, err := checkNewSync(syncInst, err) - return Int64UpDownCounter{syncInstrument: common}, err -} - -// wrapFloat64UpDownCounterInstrument converts a SyncImpl into Float64UpDownCounter. -func wrapFloat64UpDownCounterInstrument(syncInst sdkapi.SyncImpl, err error) (Float64UpDownCounter, error) { - common, err := checkNewSync(syncInst, err) - return Float64UpDownCounter{syncInstrument: common}, err -} - -// wrapInt64HistogramInstrument converts a SyncImpl into Int64Histogram. -func wrapInt64HistogramInstrument(syncInst sdkapi.SyncImpl, err error) (Int64Histogram, error) { - common, err := checkNewSync(syncInst, err) - return Int64Histogram{syncInstrument: common}, err -} - -// wrapFloat64HistogramInstrument converts a SyncImpl into Float64Histogram. -func wrapFloat64HistogramInstrument(syncInst sdkapi.SyncImpl, err error) (Float64Histogram, error) { - common, err := checkNewSync(syncInst, err) - return Float64Histogram{syncInstrument: common}, err -} - -// Float64Counter is a metric that accumulates float64 values. -type Float64Counter struct { - syncInstrument -} - -// Int64Counter is a metric that accumulates int64 values. -type Int64Counter struct { - syncInstrument -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Float64Counter) Measurement(value float64) Measurement { - return c.float64Measurement(value) -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Int64Counter) Measurement(value int64) Measurement { - return c.int64Measurement(value) -} - -// Add adds the value to the counter's sum. The labels should contain -// the keys and values to be associated with this value. -func (c Float64Counter) Add(ctx context.Context, value float64, labels ...attribute.KeyValue) { - c.directRecord(ctx, number.NewFloat64Number(value), labels) -} - -// Add adds the value to the counter's sum. The labels should contain -// the keys and values to be associated with this value. -func (c Int64Counter) Add(ctx context.Context, value int64, labels ...attribute.KeyValue) { - c.directRecord(ctx, number.NewInt64Number(value), labels) -} - -// Float64UpDownCounter is a metric instrument that sums floating -// point values. -type Float64UpDownCounter struct { - syncInstrument -} - -// Int64UpDownCounter is a metric instrument that sums integer values. -type Int64UpDownCounter struct { - syncInstrument -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Float64UpDownCounter) Measurement(value float64) Measurement { - return c.float64Measurement(value) -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Int64UpDownCounter) Measurement(value int64) Measurement { - return c.int64Measurement(value) -} - -// Add adds the value to the counter's sum. The labels should contain -// the keys and values to be associated with this value. -func (c Float64UpDownCounter) Add(ctx context.Context, value float64, labels ...attribute.KeyValue) { - c.directRecord(ctx, number.NewFloat64Number(value), labels) -} - -// Add adds the value to the counter's sum. The labels should contain -// the keys and values to be associated with this value. -func (c Int64UpDownCounter) Add(ctx context.Context, value int64, labels ...attribute.KeyValue) { - c.directRecord(ctx, number.NewInt64Number(value), labels) -} - -// Float64Histogram is a metric that records float64 values. -type Float64Histogram struct { - syncInstrument -} - -// Int64Histogram is a metric that records int64 values. -type Int64Histogram struct { - syncInstrument -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Float64Histogram) Measurement(value float64) Measurement { - return c.float64Measurement(value) -} - -// Measurement creates a Measurement object to use with batch -// recording. -func (c Int64Histogram) Measurement(value int64) Measurement { - return c.int64Measurement(value) -} - -// Record adds a new value to the list of Histogram's records. The -// labels should contain the keys and values to be associated with -// this value. -func (c Float64Histogram) Record(ctx context.Context, value float64, labels ...attribute.KeyValue) { - c.directRecord(ctx, number.NewFloat64Number(value), labels) -} - -// Record adds a new value to the Histogram's distribution. The -// labels should contain the keys and values to be associated with -// this value. -func (c Int64Histogram) Record(ctx context.Context, value int64, labels ...attribute.KeyValue) { - c.directRecord(ctx, number.NewInt64Number(value), labels) -} diff --git a/vendor/go.opentelemetry.io/otel/metric/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop.go index 37c653f51a1e2..e8b9a9a14588b 100644 --- a/vendor/go.opentelemetry.io/otel/metric/noop.go +++ b/vendor/go.opentelemetry.io/otel/metric/noop.go @@ -14,17 +14,168 @@ package metric // import "go.opentelemetry.io/otel/metric" -type noopMeterProvider struct{} +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/instrument" + "go.opentelemetry.io/otel/metric/instrument/asyncfloat64" + "go.opentelemetry.io/otel/metric/instrument/asyncint64" + "go.opentelemetry.io/otel/metric/instrument/syncfloat64" + "go.opentelemetry.io/otel/metric/instrument/syncint64" +) -// NewNoopMeterProvider returns an implementation of MeterProvider that -// performs no operations. The Meter and Instrument created from the returned -// MeterProvider also perform no operations. +// NewNoopMeterProvider creates a MeterProvider that does not record any metrics. func NewNoopMeterProvider() MeterProvider { return noopMeterProvider{} } -var _ MeterProvider = noopMeterProvider{} +type noopMeterProvider struct{} + +func (noopMeterProvider) Meter(string, ...MeterOption) Meter { + return noopMeter{} +} + +// NewNoopMeter creates a Meter that does not record any metrics. +func NewNoopMeter() Meter { + return noopMeter{} +} + +type noopMeter struct{} + +// AsyncInt64 creates an instrument that does not record any metrics. +func (noopMeter) AsyncInt64() asyncint64.InstrumentProvider { + return nonrecordingAsyncInt64Instrument{} +} + +// AsyncFloat64 creates an instrument that does not record any metrics. +func (noopMeter) AsyncFloat64() asyncfloat64.InstrumentProvider { + return nonrecordingAsyncFloat64Instrument{} +} + +// SyncInt64 creates an instrument that does not record any metrics. +func (noopMeter) SyncInt64() syncint64.InstrumentProvider { + return nonrecordingSyncInt64Instrument{} +} + +// SyncFloat64 creates an instrument that does not record any metrics. +func (noopMeter) SyncFloat64() syncfloat64.InstrumentProvider { + return nonrecordingSyncFloat64Instrument{} +} + +// RegisterCallback creates a register callback that does not record any metrics. +func (noopMeter) RegisterCallback([]instrument.Asynchronous, func(context.Context)) error { + return nil +} + +type nonrecordingAsyncFloat64Instrument struct { + instrument.Asynchronous +} + +var ( + _ asyncfloat64.InstrumentProvider = nonrecordingAsyncFloat64Instrument{} + _ asyncfloat64.Counter = nonrecordingAsyncFloat64Instrument{} + _ asyncfloat64.UpDownCounter = nonrecordingAsyncFloat64Instrument{} + _ asyncfloat64.Gauge = nonrecordingAsyncFloat64Instrument{} +) + +func (n nonrecordingAsyncFloat64Instrument) Counter(string, ...instrument.Option) (asyncfloat64.Counter, error) { + return n, nil +} + +func (n nonrecordingAsyncFloat64Instrument) UpDownCounter(string, ...instrument.Option) (asyncfloat64.UpDownCounter, error) { + return n, nil +} + +func (n nonrecordingAsyncFloat64Instrument) Gauge(string, ...instrument.Option) (asyncfloat64.Gauge, error) { + return n, nil +} + +func (nonrecordingAsyncFloat64Instrument) Observe(context.Context, float64, ...attribute.KeyValue) { + +} + +type nonrecordingAsyncInt64Instrument struct { + instrument.Asynchronous +} + +var ( + _ asyncint64.InstrumentProvider = nonrecordingAsyncInt64Instrument{} + _ asyncint64.Counter = nonrecordingAsyncInt64Instrument{} + _ asyncint64.UpDownCounter = nonrecordingAsyncInt64Instrument{} + _ asyncint64.Gauge = nonrecordingAsyncInt64Instrument{} +) -func (noopMeterProvider) Meter(instrumentationName string, opts ...MeterOption) Meter { - return Meter{} +func (n nonrecordingAsyncInt64Instrument) Counter(string, ...instrument.Option) (asyncint64.Counter, error) { + return n, nil +} + +func (n nonrecordingAsyncInt64Instrument) UpDownCounter(string, ...instrument.Option) (asyncint64.UpDownCounter, error) { + return n, nil +} + +func (n nonrecordingAsyncInt64Instrument) Gauge(string, ...instrument.Option) (asyncint64.Gauge, error) { + return n, nil +} + +func (nonrecordingAsyncInt64Instrument) Observe(context.Context, int64, ...attribute.KeyValue) { +} + +type nonrecordingSyncFloat64Instrument struct { + instrument.Synchronous +} + +var ( + _ syncfloat64.InstrumentProvider = nonrecordingSyncFloat64Instrument{} + _ syncfloat64.Counter = nonrecordingSyncFloat64Instrument{} + _ syncfloat64.UpDownCounter = nonrecordingSyncFloat64Instrument{} + _ syncfloat64.Histogram = nonrecordingSyncFloat64Instrument{} +) + +func (n nonrecordingSyncFloat64Instrument) Counter(string, ...instrument.Option) (syncfloat64.Counter, error) { + return n, nil +} + +func (n nonrecordingSyncFloat64Instrument) UpDownCounter(string, ...instrument.Option) (syncfloat64.UpDownCounter, error) { + return n, nil +} + +func (n nonrecordingSyncFloat64Instrument) Histogram(string, ...instrument.Option) (syncfloat64.Histogram, error) { + return n, nil +} + +func (nonrecordingSyncFloat64Instrument) Add(context.Context, float64, ...attribute.KeyValue) { + +} + +func (nonrecordingSyncFloat64Instrument) Record(context.Context, float64, ...attribute.KeyValue) { + +} + +type nonrecordingSyncInt64Instrument struct { + instrument.Synchronous +} + +var ( + _ syncint64.InstrumentProvider = nonrecordingSyncInt64Instrument{} + _ syncint64.Counter = nonrecordingSyncInt64Instrument{} + _ syncint64.UpDownCounter = nonrecordingSyncInt64Instrument{} + _ syncint64.Histogram = nonrecordingSyncInt64Instrument{} +) + +func (n nonrecordingSyncInt64Instrument) Counter(string, ...instrument.Option) (syncint64.Counter, error) { + return n, nil +} + +func (n nonrecordingSyncInt64Instrument) UpDownCounter(string, ...instrument.Option) (syncint64.UpDownCounter, error) { + return n, nil +} + +func (n nonrecordingSyncInt64Instrument) Histogram(string, ...instrument.Option) (syncint64.Histogram, error) { + return n, nil +} + +func (nonrecordingSyncInt64Instrument) Add(context.Context, int64, ...attribute.KeyValue) { +} +func (nonrecordingSyncInt64Instrument) Record(context.Context, int64, ...attribute.KeyValue) { } diff --git a/vendor/go.opentelemetry.io/otel/metric/number/kind_string.go b/vendor/go.opentelemetry.io/otel/metric/number/kind_string.go deleted file mode 100644 index 6288c7ea295f7..0000000000000 --- a/vendor/go.opentelemetry.io/otel/metric/number/kind_string.go +++ /dev/null @@ -1,24 +0,0 @@ -// Code generated by "stringer -type=Kind"; DO NOT EDIT. - -package number - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[Int64Kind-0] - _ = x[Float64Kind-1] -} - -const _Kind_name = "Int64KindFloat64Kind" - -var _Kind_index = [...]uint8{0, 9, 20} - -func (i Kind) String() string { - if i < 0 || i >= Kind(len(_Kind_index)-1) { - return "Kind(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Kind_name[_Kind_index[i]:_Kind_index[i+1]] -} diff --git a/vendor/go.opentelemetry.io/otel/metric/number/number.go b/vendor/go.opentelemetry.io/otel/metric/number/number.go deleted file mode 100644 index 3ec95e2014d7f..0000000000000 --- a/vendor/go.opentelemetry.io/otel/metric/number/number.go +++ /dev/null @@ -1,538 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package number // import "go.opentelemetry.io/otel/metric/number" - -//go:generate stringer -type=Kind - -import ( - "fmt" - "math" - "sync/atomic" - - "go.opentelemetry.io/otel/internal" -) - -// Kind describes the data type of the Number. -type Kind int8 - -const ( - // Int64Kind means that the Number stores int64. - Int64Kind Kind = iota - // Float64Kind means that the Number stores float64. - Float64Kind -) - -// Zero returns a zero value for a given Kind -func (k Kind) Zero() Number { - switch k { - case Int64Kind: - return NewInt64Number(0) - case Float64Kind: - return NewFloat64Number(0.) - default: - return Number(0) - } -} - -// Minimum returns the minimum representable value -// for a given Kind -func (k Kind) Minimum() Number { - switch k { - case Int64Kind: - return NewInt64Number(math.MinInt64) - case Float64Kind: - return NewFloat64Number(-1. * math.MaxFloat64) - default: - return Number(0) - } -} - -// Maximum returns the maximum representable value -// for a given Kind -func (k Kind) Maximum() Number { - switch k { - case Int64Kind: - return NewInt64Number(math.MaxInt64) - case Float64Kind: - return NewFloat64Number(math.MaxFloat64) - default: - return Number(0) - } -} - -// Number represents either an integral or a floating point value. It -// needs to be accompanied with a source of Kind that describes -// the actual type of the value stored within Number. -type Number uint64 - -// - constructors - -// NewNumberFromRaw creates a new Number from a raw value. -func NewNumberFromRaw(r uint64) Number { - return Number(r) -} - -// NewInt64Number creates an integral Number. -func NewInt64Number(i int64) Number { - return NewNumberFromRaw(internal.Int64ToRaw(i)) -} - -// NewFloat64Number creates a floating point Number. -func NewFloat64Number(f float64) Number { - return NewNumberFromRaw(internal.Float64ToRaw(f)) -} - -// NewNumberSignChange returns a number with the same magnitude and -// the opposite sign. `kind` must describe the kind of number in `nn`. -func NewNumberSignChange(kind Kind, nn Number) Number { - switch kind { - case Int64Kind: - return NewInt64Number(-nn.AsInt64()) - case Float64Kind: - return NewFloat64Number(-nn.AsFloat64()) - } - return nn -} - -// - as x - -// AsNumber gets the Number. -func (n *Number) AsNumber() Number { - return *n -} - -// AsRaw gets the uninterpreted raw value. Might be useful for some -// atomic operations. -func (n *Number) AsRaw() uint64 { - return uint64(*n) -} - -// AsInt64 assumes that the value contains an int64 and returns it as -// such. -func (n *Number) AsInt64() int64 { - return internal.RawToInt64(n.AsRaw()) -} - -// AsFloat64 assumes that the measurement value contains a float64 and -// returns it as such. -func (n *Number) AsFloat64() float64 { - return internal.RawToFloat64(n.AsRaw()) -} - -// - as x atomic - -// AsNumberAtomic gets the Number atomically. -func (n *Number) AsNumberAtomic() Number { - return NewNumberFromRaw(n.AsRawAtomic()) -} - -// AsRawAtomic gets the uninterpreted raw value atomically. Might be -// useful for some atomic operations. -func (n *Number) AsRawAtomic() uint64 { - return atomic.LoadUint64(n.AsRawPtr()) -} - -// AsInt64Atomic assumes that the number contains an int64 and returns -// it as such atomically. -func (n *Number) AsInt64Atomic() int64 { - return atomic.LoadInt64(n.AsInt64Ptr()) -} - -// AsFloat64Atomic assumes that the measurement value contains a -// float64 and returns it as such atomically. -func (n *Number) AsFloat64Atomic() float64 { - return internal.RawToFloat64(n.AsRawAtomic()) -} - -// - as x ptr - -// AsRawPtr gets the pointer to the raw, uninterpreted raw -// value. Might be useful for some atomic operations. -func (n *Number) AsRawPtr() *uint64 { - return (*uint64)(n) -} - -// AsInt64Ptr assumes that the number contains an int64 and returns a -// pointer to it. -func (n *Number) AsInt64Ptr() *int64 { - return internal.RawPtrToInt64Ptr(n.AsRawPtr()) -} - -// AsFloat64Ptr assumes that the number contains a float64 and returns a -// pointer to it. -func (n *Number) AsFloat64Ptr() *float64 { - return internal.RawPtrToFloat64Ptr(n.AsRawPtr()) -} - -// - coerce - -// CoerceToInt64 casts the number to int64. May result in -// data/precision loss. -func (n *Number) CoerceToInt64(kind Kind) int64 { - switch kind { - case Int64Kind: - return n.AsInt64() - case Float64Kind: - return int64(n.AsFloat64()) - default: - // you get what you deserve - return 0 - } -} - -// CoerceToFloat64 casts the number to float64. May result in -// data/precision loss. -func (n *Number) CoerceToFloat64(kind Kind) float64 { - switch kind { - case Int64Kind: - return float64(n.AsInt64()) - case Float64Kind: - return n.AsFloat64() - default: - // you get what you deserve - return 0 - } -} - -// - set - -// SetNumber sets the number to the passed number. Both should be of -// the same kind. -func (n *Number) SetNumber(nn Number) { - *n.AsRawPtr() = nn.AsRaw() -} - -// SetRaw sets the number to the passed raw value. Both number and the -// raw number should represent the same kind. -func (n *Number) SetRaw(r uint64) { - *n.AsRawPtr() = r -} - -// SetInt64 assumes that the number contains an int64 and sets it to -// the passed value. -func (n *Number) SetInt64(i int64) { - *n.AsInt64Ptr() = i -} - -// SetFloat64 assumes that the number contains a float64 and sets it -// to the passed value. -func (n *Number) SetFloat64(f float64) { - *n.AsFloat64Ptr() = f -} - -// - set atomic - -// SetNumberAtomic sets the number to the passed number -// atomically. Both should be of the same kind. -func (n *Number) SetNumberAtomic(nn Number) { - atomic.StoreUint64(n.AsRawPtr(), nn.AsRaw()) -} - -// SetRawAtomic sets the number to the passed raw value -// atomically. Both number and the raw number should represent the -// same kind. -func (n *Number) SetRawAtomic(r uint64) { - atomic.StoreUint64(n.AsRawPtr(), r) -} - -// SetInt64Atomic assumes that the number contains an int64 and sets -// it to the passed value atomically. -func (n *Number) SetInt64Atomic(i int64) { - atomic.StoreInt64(n.AsInt64Ptr(), i) -} - -// SetFloat64Atomic assumes that the number contains a float64 and -// sets it to the passed value atomically. -func (n *Number) SetFloat64Atomic(f float64) { - atomic.StoreUint64(n.AsRawPtr(), internal.Float64ToRaw(f)) -} - -// - swap - -// SwapNumber sets the number to the passed number and returns the old -// number. Both this number and the passed number should be of the -// same kind. -func (n *Number) SwapNumber(nn Number) Number { - old := *n - n.SetNumber(nn) - return old -} - -// SwapRaw sets the number to the passed raw value and returns the old -// raw value. Both number and the raw number should represent the same -// kind. -func (n *Number) SwapRaw(r uint64) uint64 { - old := n.AsRaw() - n.SetRaw(r) - return old -} - -// SwapInt64 assumes that the number contains an int64, sets it to the -// passed value and returns the old int64 value. -func (n *Number) SwapInt64(i int64) int64 { - old := n.AsInt64() - n.SetInt64(i) - return old -} - -// SwapFloat64 assumes that the number contains an float64, sets it to -// the passed value and returns the old float64 value. -func (n *Number) SwapFloat64(f float64) float64 { - old := n.AsFloat64() - n.SetFloat64(f) - return old -} - -// - swap atomic - -// SwapNumberAtomic sets the number to the passed number and returns -// the old number atomically. Both this number and the passed number -// should be of the same kind. -func (n *Number) SwapNumberAtomic(nn Number) Number { - return NewNumberFromRaw(atomic.SwapUint64(n.AsRawPtr(), nn.AsRaw())) -} - -// SwapRawAtomic sets the number to the passed raw value and returns -// the old raw value atomically. Both number and the raw number should -// represent the same kind. -func (n *Number) SwapRawAtomic(r uint64) uint64 { - return atomic.SwapUint64(n.AsRawPtr(), r) -} - -// SwapInt64Atomic assumes that the number contains an int64, sets it -// to the passed value and returns the old int64 value atomically. -func (n *Number) SwapInt64Atomic(i int64) int64 { - return atomic.SwapInt64(n.AsInt64Ptr(), i) -} - -// SwapFloat64Atomic assumes that the number contains an float64, sets -// it to the passed value and returns the old float64 value -// atomically. -func (n *Number) SwapFloat64Atomic(f float64) float64 { - return internal.RawToFloat64(atomic.SwapUint64(n.AsRawPtr(), internal.Float64ToRaw(f))) -} - -// - add - -// AddNumber assumes that this and the passed number are of the passed -// kind and adds the passed number to this number. -func (n *Number) AddNumber(kind Kind, nn Number) { - switch kind { - case Int64Kind: - n.AddInt64(nn.AsInt64()) - case Float64Kind: - n.AddFloat64(nn.AsFloat64()) - } -} - -// AddRaw assumes that this number and the passed raw value are of the -// passed kind and adds the passed raw value to this number. -func (n *Number) AddRaw(kind Kind, r uint64) { - n.AddNumber(kind, NewNumberFromRaw(r)) -} - -// AddInt64 assumes that the number contains an int64 and adds the -// passed int64 to it. -func (n *Number) AddInt64(i int64) { - *n.AsInt64Ptr() += i -} - -// AddFloat64 assumes that the number contains a float64 and adds the -// passed float64 to it. -func (n *Number) AddFloat64(f float64) { - *n.AsFloat64Ptr() += f -} - -// - add atomic - -// AddNumberAtomic assumes that this and the passed number are of the -// passed kind and adds the passed number to this number atomically. -func (n *Number) AddNumberAtomic(kind Kind, nn Number) { - switch kind { - case Int64Kind: - n.AddInt64Atomic(nn.AsInt64()) - case Float64Kind: - n.AddFloat64Atomic(nn.AsFloat64()) - } -} - -// AddRawAtomic assumes that this number and the passed raw value are -// of the passed kind and adds the passed raw value to this number -// atomically. -func (n *Number) AddRawAtomic(kind Kind, r uint64) { - n.AddNumberAtomic(kind, NewNumberFromRaw(r)) -} - -// AddInt64Atomic assumes that the number contains an int64 and adds -// the passed int64 to it atomically. -func (n *Number) AddInt64Atomic(i int64) { - atomic.AddInt64(n.AsInt64Ptr(), i) -} - -// AddFloat64Atomic assumes that the number contains a float64 and -// adds the passed float64 to it atomically. -func (n *Number) AddFloat64Atomic(f float64) { - for { - o := n.AsFloat64Atomic() - if n.CompareAndSwapFloat64(o, o+f) { - break - } - } -} - -// - compare and swap (atomic only) - -// CompareAndSwapNumber does the atomic CAS operation on this -// number. This number and passed old and new numbers should be of the -// same kind. -func (n *Number) CompareAndSwapNumber(on, nn Number) bool { - return atomic.CompareAndSwapUint64(n.AsRawPtr(), on.AsRaw(), nn.AsRaw()) -} - -// CompareAndSwapRaw does the atomic CAS operation on this -// number. This number and passed old and new raw values should be of -// the same kind. -func (n *Number) CompareAndSwapRaw(or, nr uint64) bool { - return atomic.CompareAndSwapUint64(n.AsRawPtr(), or, nr) -} - -// CompareAndSwapInt64 assumes that this number contains an int64 and -// does the atomic CAS operation on it. -func (n *Number) CompareAndSwapInt64(oi, ni int64) bool { - return atomic.CompareAndSwapInt64(n.AsInt64Ptr(), oi, ni) -} - -// CompareAndSwapFloat64 assumes that this number contains a float64 and -// does the atomic CAS operation on it. -func (n *Number) CompareAndSwapFloat64(of, nf float64) bool { - return atomic.CompareAndSwapUint64(n.AsRawPtr(), internal.Float64ToRaw(of), internal.Float64ToRaw(nf)) -} - -// - compare - -// CompareNumber compares two Numbers given their kind. Both numbers -// should have the same kind. This returns: -// 0 if the numbers are equal -// -1 if the subject `n` is less than the argument `nn` -// +1 if the subject `n` is greater than the argument `nn` -func (n *Number) CompareNumber(kind Kind, nn Number) int { - switch kind { - case Int64Kind: - return n.CompareInt64(nn.AsInt64()) - case Float64Kind: - return n.CompareFloat64(nn.AsFloat64()) - default: - // you get what you deserve - return 0 - } -} - -// CompareRaw compares two numbers, where one is input as a raw -// uint64, interpreting both values as a `kind` of number. -func (n *Number) CompareRaw(kind Kind, r uint64) int { - return n.CompareNumber(kind, NewNumberFromRaw(r)) -} - -// CompareInt64 assumes that the Number contains an int64 and performs -// a comparison between the value and the other value. It returns the -// typical result of the compare function: -1 if the value is less -// than the other, 0 if both are equal, 1 if the value is greater than -// the other. -func (n *Number) CompareInt64(i int64) int { - this := n.AsInt64() - if this < i { - return -1 - } else if this > i { - return 1 - } - return 0 -} - -// CompareFloat64 assumes that the Number contains a float64 and -// performs a comparison between the value and the other value. It -// returns the typical result of the compare function: -1 if the value -// is less than the other, 0 if both are equal, 1 if the value is -// greater than the other. -// -// Do not compare NaN values. -func (n *Number) CompareFloat64(f float64) int { - this := n.AsFloat64() - if this < f { - return -1 - } else if this > f { - return 1 - } - return 0 -} - -// - relations to zero - -// IsPositive returns true if the actual value is greater than zero. -func (n *Number) IsPositive(kind Kind) bool { - return n.compareWithZero(kind) > 0 -} - -// IsNegative returns true if the actual value is less than zero. -func (n *Number) IsNegative(kind Kind) bool { - return n.compareWithZero(kind) < 0 -} - -// IsZero returns true if the actual value is equal to zero. -func (n *Number) IsZero(kind Kind) bool { - return n.compareWithZero(kind) == 0 -} - -// - misc - -// Emit returns a string representation of the raw value of the -// Number. A %d is used for integral values, %f for floating point -// values. -func (n *Number) Emit(kind Kind) string { - switch kind { - case Int64Kind: - return fmt.Sprintf("%d", n.AsInt64()) - case Float64Kind: - return fmt.Sprintf("%f", n.AsFloat64()) - default: - return "" - } -} - -// AsInterface returns the number as an interface{}, typically used -// for Kind-correct JSON conversion. -func (n *Number) AsInterface(kind Kind) interface{} { - switch kind { - case Int64Kind: - return n.AsInt64() - case Float64Kind: - return n.AsFloat64() - default: - return math.NaN() - } -} - -// - private stuff - -func (n *Number) compareWithZero(kind Kind) int { - switch kind { - case Int64Kind: - return n.CompareInt64(0) - case Float64Kind: - return n.CompareFloat64(0.) - default: - // you get what you deserve - return 0 - } -} diff --git a/vendor/go.opentelemetry.io/otel/metric/sdkapi/descriptor.go b/vendor/go.opentelemetry.io/otel/metric/sdkapi/descriptor.go deleted file mode 100644 index 14eb0532e45a9..0000000000000 --- a/vendor/go.opentelemetry.io/otel/metric/sdkapi/descriptor.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sdkapi // import "go.opentelemetry.io/otel/metric/sdkapi" - -import ( - "go.opentelemetry.io/otel/metric/number" - "go.opentelemetry.io/otel/metric/unit" -) - -// Descriptor contains all the settings that describe an instrument, -// including its name, metric kind, number kind, and the configurable -// options. -type Descriptor struct { - name string - instrumentKind InstrumentKind - numberKind number.Kind - description string - unit unit.Unit -} - -// NewDescriptor returns a Descriptor with the given contents. -func NewDescriptor(name string, ikind InstrumentKind, nkind number.Kind, description string, unit unit.Unit) Descriptor { - return Descriptor{ - name: name, - instrumentKind: ikind, - numberKind: nkind, - description: description, - unit: unit, - } -} - -// Name returns the metric instrument's name. -func (d Descriptor) Name() string { - return d.name -} - -// InstrumentKind returns the specific kind of instrument. -func (d Descriptor) InstrumentKind() InstrumentKind { - return d.instrumentKind -} - -// Description provides a human-readable description of the metric -// instrument. -func (d Descriptor) Description() string { - return d.description -} - -// Unit describes the units of the metric instrument. Unitless -// metrics return the empty string. -func (d Descriptor) Unit() unit.Unit { - return d.unit -} - -// NumberKind returns whether this instrument is declared over int64, -// float64, or uint64 values. -func (d Descriptor) NumberKind() number.Kind { - return d.numberKind -} diff --git a/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind.go b/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind.go deleted file mode 100644 index 64aa5ead123cd..0000000000000 --- a/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:generate stringer -type=InstrumentKind - -package sdkapi // import "go.opentelemetry.io/otel/metric/sdkapi" - -// InstrumentKind describes the kind of instrument. -type InstrumentKind int8 - -const ( - // HistogramInstrumentKind indicates a Histogram instrument. - HistogramInstrumentKind InstrumentKind = iota - // GaugeObserverInstrumentKind indicates an GaugeObserver instrument. - GaugeObserverInstrumentKind - - // CounterInstrumentKind indicates a Counter instrument. - CounterInstrumentKind - // UpDownCounterInstrumentKind indicates a UpDownCounter instrument. - UpDownCounterInstrumentKind - - // CounterObserverInstrumentKind indicates a CounterObserver instrument. - CounterObserverInstrumentKind - // UpDownCounterObserverInstrumentKind indicates a UpDownCounterObserver - // instrument. - UpDownCounterObserverInstrumentKind -) - -// Synchronous returns whether this is a synchronous kind of instrument. -func (k InstrumentKind) Synchronous() bool { - switch k { - case CounterInstrumentKind, UpDownCounterInstrumentKind, HistogramInstrumentKind: - return true - } - return false -} - -// Asynchronous returns whether this is an asynchronous kind of instrument. -func (k InstrumentKind) Asynchronous() bool { - return !k.Synchronous() -} - -// Adding returns whether this kind of instrument adds its inputs (as opposed to Grouping). -func (k InstrumentKind) Adding() bool { - switch k { - case CounterInstrumentKind, UpDownCounterInstrumentKind, CounterObserverInstrumentKind, UpDownCounterObserverInstrumentKind: - return true - } - return false -} - -// Grouping returns whether this kind of instrument groups its inputs (as opposed to Adding). -func (k InstrumentKind) Grouping() bool { - return !k.Adding() -} - -// Monotonic returns whether this kind of instrument exposes a non-decreasing sum. -func (k InstrumentKind) Monotonic() bool { - switch k { - case CounterInstrumentKind, CounterObserverInstrumentKind: - return true - } - return false -} - -// PrecomputedSum returns whether this kind of instrument receives precomputed sums. -func (k InstrumentKind) PrecomputedSum() bool { - return k.Adding() && k.Asynchronous() -} diff --git a/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind_string.go b/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind_string.go deleted file mode 100644 index 3a2e79d823ef7..0000000000000 --- a/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind_string.go +++ /dev/null @@ -1,28 +0,0 @@ -// Code generated by "stringer -type=InstrumentKind"; DO NOT EDIT. - -package sdkapi - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[HistogramInstrumentKind-0] - _ = x[GaugeObserverInstrumentKind-1] - _ = x[CounterInstrumentKind-2] - _ = x[UpDownCounterInstrumentKind-3] - _ = x[CounterObserverInstrumentKind-4] - _ = x[UpDownCounterObserverInstrumentKind-5] -} - -const _InstrumentKind_name = "HistogramInstrumentKindGaugeObserverInstrumentKindCounterInstrumentKindUpDownCounterInstrumentKindCounterObserverInstrumentKindUpDownCounterObserverInstrumentKind" - -var _InstrumentKind_index = [...]uint8{0, 23, 50, 71, 98, 127, 162} - -func (i InstrumentKind) String() string { - if i < 0 || i >= InstrumentKind(len(_InstrumentKind_index)-1) { - return "InstrumentKind(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _InstrumentKind_name[_InstrumentKind_index[i]:_InstrumentKind_index[i+1]] -} diff --git a/vendor/go.opentelemetry.io/otel/metric/sdkapi/noop.go b/vendor/go.opentelemetry.io/otel/metric/sdkapi/noop.go deleted file mode 100644 index f22895dae6fdb..0000000000000 --- a/vendor/go.opentelemetry.io/otel/metric/sdkapi/noop.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sdkapi // import "go.opentelemetry.io/otel/metric/sdkapi" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/number" -) - -type noopInstrument struct { - descriptor Descriptor -} -type noopSyncInstrument struct{ noopInstrument } -type noopAsyncInstrument struct{ noopInstrument } - -var _ SyncImpl = noopSyncInstrument{} -var _ AsyncImpl = noopAsyncInstrument{} - -// NewNoopSyncInstrument returns a No-op implementation of the -// synchronous instrument interface. -func NewNoopSyncInstrument() SyncImpl { - return noopSyncInstrument{ - noopInstrument{ - descriptor: Descriptor{ - instrumentKind: CounterInstrumentKind, - }, - }, - } -} - -// NewNoopAsyncInstrument returns a No-op implementation of the -// asynchronous instrument interface. -func NewNoopAsyncInstrument() AsyncImpl { - return noopAsyncInstrument{ - noopInstrument{ - descriptor: Descriptor{ - instrumentKind: CounterObserverInstrumentKind, - }, - }, - } -} - -func (noopInstrument) Implementation() interface{} { - return nil -} - -func (n noopInstrument) Descriptor() Descriptor { - return n.descriptor -} - -func (noopSyncInstrument) RecordOne(context.Context, number.Number, []attribute.KeyValue) { -} diff --git a/vendor/go.opentelemetry.io/otel/metric/sdkapi/sdkapi.go b/vendor/go.opentelemetry.io/otel/metric/sdkapi/sdkapi.go deleted file mode 100644 index 36836364bdceb..0000000000000 --- a/vendor/go.opentelemetry.io/otel/metric/sdkapi/sdkapi.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sdkapi // import "go.opentelemetry.io/otel/metric/sdkapi" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/number" -) - -// MeterImpl is the interface an SDK must implement to supply a Meter -// implementation. -type MeterImpl interface { - // RecordBatch atomically records a batch of measurements. - RecordBatch(ctx context.Context, labels []attribute.KeyValue, measurement ...Measurement) - - // NewSyncInstrument returns a newly constructed - // synchronous instrument implementation or an error, should - // one occur. - NewSyncInstrument(descriptor Descriptor) (SyncImpl, error) - - // NewAsyncInstrument returns a newly constructed - // asynchronous instrument implementation or an error, should - // one occur. - NewAsyncInstrument( - descriptor Descriptor, - runner AsyncRunner, - ) (AsyncImpl, error) -} - -// InstrumentImpl is a common interface for synchronous and -// asynchronous instruments. -type InstrumentImpl interface { - // Implementation returns the underlying implementation of the - // instrument, which allows the implementation to gain access - // to its own representation especially from a `Measurement`. - Implementation() interface{} - - // Descriptor returns a copy of the instrument's Descriptor. - Descriptor() Descriptor -} - -// SyncImpl is the implementation-level interface to a generic -// synchronous instrument (e.g., Histogram and Counter instruments). -type SyncImpl interface { - InstrumentImpl - - // RecordOne captures a single synchronous metric event. - RecordOne(ctx context.Context, number number.Number, labels []attribute.KeyValue) -} - -// AsyncImpl is an implementation-level interface to an -// asynchronous instrument (e.g., Observer instruments). -type AsyncImpl interface { - InstrumentImpl -} - -// AsyncRunner is expected to convert into an AsyncSingleRunner or an -// AsyncBatchRunner. SDKs will encounter an error if the AsyncRunner -// does not satisfy one of these interfaces. -type AsyncRunner interface { - // AnyRunner is a non-exported method with no functional use - // other than to make this a non-empty interface. - AnyRunner() -} - -// AsyncSingleRunner is an interface implemented by single-observer -// callbacks. -type AsyncSingleRunner interface { - // Run accepts a single instrument and function for capturing - // observations of that instrument. Each call to the function - // receives one captured observation. (The function accepts - // multiple observations so the same implementation can be - // used for batch runners.) - Run(ctx context.Context, single AsyncImpl, capture func([]attribute.KeyValue, ...Observation)) - - AsyncRunner -} - -// AsyncBatchRunner is an interface implemented by batch-observer -// callbacks. -type AsyncBatchRunner interface { - // Run accepts a function for capturing observations of - // multiple instruments. - Run(ctx context.Context, capture func([]attribute.KeyValue, ...Observation)) - - AsyncRunner -} - -// NewMeasurement constructs a single observation, a binding between -// an asynchronous instrument and a number. -func NewMeasurement(instrument SyncImpl, number number.Number) Measurement { - return Measurement{ - instrument: instrument, - number: number, - } -} - -// Measurement is a low-level type used with synchronous instruments -// as a direct interface to the SDK via `RecordBatch`. -type Measurement struct { - // number needs to be aligned for 64-bit atomic operations. - number number.Number - instrument SyncImpl -} - -// SyncImpl returns the instrument that created this measurement. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (m Measurement) SyncImpl() SyncImpl { - return m.instrument -} - -// Number returns a number recorded in this measurement. -func (m Measurement) Number() number.Number { - return m.number -} - -// NewObservation constructs a single observation, a binding between -// an asynchronous instrument and a number. -func NewObservation(instrument AsyncImpl, number number.Number) Observation { - return Observation{ - instrument: instrument, - number: number, - } -} - -// Observation is a low-level type used with asynchronous instruments -// as a direct interface to the SDK via `BatchObserver`. -type Observation struct { - // number needs to be aligned for 64-bit atomic operations. - number number.Number - instrument AsyncImpl -} - -// AsyncImpl returns the instrument that created this observation. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (m Observation) AsyncImpl() AsyncImpl { - return m.instrument -} - -// Number returns a number recorded in this observation. -func (m Observation) Number() number.Number { - return m.number -} diff --git a/vendor/go.opentelemetry.io/otel/metric/unit/unit.go b/vendor/go.opentelemetry.io/otel/metric/unit/unit.go index 4615eb16f6990..647d77302de26 100644 --- a/vendor/go.opentelemetry.io/otel/metric/unit/unit.go +++ b/vendor/go.opentelemetry.io/otel/metric/unit/unit.go @@ -14,6 +14,7 @@ package unit // import "go.opentelemetry.io/otel/metric/unit" +// Unit is a determinate standard quantity of measurement. type Unit string // Units defined by OpenTelemetry. diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go new file mode 100644 index 0000000000000..6e923acab4328 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go @@ -0,0 +1,24 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package instrumentation provides types to represent the code libraries that +// provide OpenTelemetry instrumentation. These types are used in the +// OpenTelemetry signal pipelines to identify the source of telemetry. +// +// See +// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0083-component.md +// and +// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0201-scope-attributes.md +// for more information. +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go index 6f0016169e302..39f025a1715bf 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go @@ -12,22 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -/* -Package instrumentation provides an instrumentation library structure to be -passed to both the OpenTelemetry Tracer and Meter components. - -For more information see -[this](https://github.com/open-telemetry/oteps/blob/main/text/0083-component.md). -*/ package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" // Library represents the instrumentation library. -type Library struct { - // Name is the name of the instrumentation library. This should be the - // Go package name of that library. - Name string - // Version is the version of the instrumentation library. - Version string - // SchemaURL of the telemetry emitted by the library. - SchemaURL string -} +// Deprecated: please use Scope instead. +type Library = Scope diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go new file mode 100644 index 0000000000000..09c6d93f6d095 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" + +// Scope represents the instrumentation scope. +type Scope struct { + // Name is the name of the instrumentation scope. This should be the + // Go package name of that scope. + Name string + // Version is the version of the instrumentation scope. + Version string + // SchemaURL of the telemetry emitted by the scope. + SchemaURL string +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go index 397fd9593cccd..5e94b8ae521ad 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go @@ -21,27 +21,72 @@ import ( "go.opentelemetry.io/otel/internal/global" ) -// Environment variable names +// Environment variable names. const ( - // BatchSpanProcessorScheduleDelayKey - // Delay interval between two consecutive exports. - // i.e. 5000 + // BatchSpanProcessorScheduleDelayKey is the delay interval between two + // consecutive exports (i.e. 5000). BatchSpanProcessorScheduleDelayKey = "OTEL_BSP_SCHEDULE_DELAY" - // BatchSpanProcessorExportTimeoutKey - // Maximum allowed time to export data. - // i.e. 3000 + // BatchSpanProcessorExportTimeoutKey is the maximum allowed time to + // export data (i.e. 3000). BatchSpanProcessorExportTimeoutKey = "OTEL_BSP_EXPORT_TIMEOUT" - // BatchSpanProcessorMaxQueueSizeKey - // Maximum queue size - // i.e. 2048 + // BatchSpanProcessorMaxQueueSizeKey is the maximum queue size (i.e. 2048). BatchSpanProcessorMaxQueueSizeKey = "OTEL_BSP_MAX_QUEUE_SIZE" - // BatchSpanProcessorMaxExportBatchSizeKey - // Maximum batch size - // Note: Must be less than or equal to EnvBatchSpanProcessorMaxQueueSize - // i.e. 512 + // BatchSpanProcessorMaxExportBatchSizeKey is the maximum batch size (i.e. + // 512). Note: it must be less than or equal to + // EnvBatchSpanProcessorMaxQueueSize. BatchSpanProcessorMaxExportBatchSizeKey = "OTEL_BSP_MAX_EXPORT_BATCH_SIZE" + + // AttributeValueLengthKey is the maximum allowed attribute value size. + AttributeValueLengthKey = "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT" + + // AttributeCountKey is the maximum allowed span attribute count. + AttributeCountKey = "OTEL_ATTRIBUTE_COUNT_LIMIT" + + // SpanAttributeValueLengthKey is the maximum allowed attribute value size + // for a span. + SpanAttributeValueLengthKey = "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT" + + // SpanAttributeCountKey is the maximum allowed span attribute count for a + // span. + SpanAttributeCountKey = "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT" + + // SpanEventCountKey is the maximum allowed span event count. + SpanEventCountKey = "OTEL_SPAN_EVENT_COUNT_LIMIT" + + // SpanEventAttributeCountKey is the maximum allowed attribute per span + // event count. + SpanEventAttributeCountKey = "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT" + + // SpanLinkCountKey is the maximum allowed span link count. + SpanLinkCountKey = "OTEL_SPAN_LINK_COUNT_LIMIT" + + // SpanLinkAttributeCountKey is the maximum allowed attribute per span + // link count. + SpanLinkAttributeCountKey = "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT" ) +// firstInt returns the value of the first matching environment variable from +// keys. If the value is not an integer or no match is found, defaultValue is +// returned. +func firstInt(defaultValue int, keys ...string) int { + for _, key := range keys { + value, ok := os.LookupEnv(key) + if !ok { + continue + } + + intValue, err := strconv.Atoi(value) + if err != nil { + global.Info("Got invalid value, number value expected.", key, value) + return defaultValue + } + + return intValue + } + + return defaultValue +} + // IntEnvOr returns the int value of the environment variable with name key if // it exists and the value is an int. Otherwise, defaultValue is returned. func IntEnvOr(key string, defaultValue int) int { @@ -86,3 +131,47 @@ func BatchSpanProcessorMaxQueueSize(defaultValue int) int { func BatchSpanProcessorMaxExportBatchSize(defaultValue int) int { return IntEnvOr(BatchSpanProcessorMaxExportBatchSizeKey, defaultValue) } + +// SpanAttributeValueLength returns the environment variable value for the +// OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the +// environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT is +// returned or defaultValue if that is not set. +func SpanAttributeValueLength(defaultValue int) int { + return firstInt(defaultValue, SpanAttributeValueLengthKey, AttributeValueLengthKey) +} + +// SpanAttributeCount returns the environment variable value for the +// OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the +// environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT is returned or +// defaultValue if that is not set. +func SpanAttributeCount(defaultValue int) int { + return firstInt(defaultValue, SpanAttributeCountKey, AttributeCountKey) +} + +// SpanEventCount returns the environment variable value for the +// OTEL_SPAN_EVENT_COUNT_LIMIT key if it exists, otherwise defaultValue is +// returned. +func SpanEventCount(defaultValue int) int { + return IntEnvOr(SpanEventCountKey, defaultValue) +} + +// SpanEventAttributeCount returns the environment variable value for the +// OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue +// is returned. +func SpanEventAttributeCount(defaultValue int) int { + return IntEnvOr(SpanEventAttributeCountKey, defaultValue) +} + +// SpanLinkCount returns the environment variable value for the +// OTEL_SPAN_LINK_COUNT_LIMIT key if it exists, otherwise defaultValue is +// returned. +func SpanLinkCount(defaultValue int) int { + return IntEnvOr(SpanLinkCountKey, defaultValue) +} + +// SpanLinkAttributeCount returns the environment variable value for the +// OTEL_LINK_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue is +// returned. +func SpanLinkAttributeCount(defaultValue int) int { + return IntEnvOr(SpanLinkAttributeCountKey, defaultValue) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go index a5eaa7e5d3949..c1d220408ae4f 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go @@ -27,7 +27,7 @@ var ( ErrPartialResource = errors.New("partial resource") ) -// Detector detects OpenTelemetry resource information +// Detector detects OpenTelemetry resource information. type Detector interface { // DO NOT CHANGE: any modification will not be backwards compatible and // must never be done outside of a new major release. diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go index 701eae40a39ef..34a474891a4cf 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -22,7 +22,7 @@ import ( "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" ) type ( @@ -92,7 +92,7 @@ func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) { return NewWithAttributes(sd.schemaURL, sd.K.String(value)), nil } -// Detect implements Detector +// Detect implements Detector. func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error) { return StringDetector( semconv.SchemaURL, diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go index d80b5ae621443..8e212b1218248 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go @@ -110,7 +110,16 @@ func WithOSDescription() Option { } // WithProcess adds all the Process attributes to the configured Resource. -// See individual WithProcess* functions to configure specific attributes. +// +// Warning! This option will include process command line arguments. If these +// contain sensitive information it will be included in the exported resource. +// +// This option is equivalent to calling WithProcessPID, +// WithProcessExecutableName, WithProcessExecutablePath, +// WithProcessCommandArgs, WithProcessOwner, WithProcessRuntimeName, +// WithProcessRuntimeVersion, and WithProcessRuntimeDescription. See each +// option function for information about what resource attributes each +// includes. func WithProcess() Option { return WithDetectors( processPIDDetector{}, @@ -143,7 +152,11 @@ func WithProcessExecutablePath() Option { } // WithProcessCommandArgs adds an attribute with all the command arguments (including -// the command/executable itself) as received by the process the configured Resource. +// the command/executable itself) as received by the process to the configured +// Resource. +// +// Warning! This option will include process command line arguments. If these +// contain sensitive information it will be included in the exported resource. func WithProcessCommandArgs() Option { return WithDetectors(processCommandArgsDetector{}) } @@ -171,3 +184,16 @@ func WithProcessRuntimeVersion() Option { func WithProcessRuntimeDescription() Option { return WithDetectors(processRuntimeDescriptionDetector{}) } + +// WithContainer adds all the Container attributes to the configured Resource. +// See individual WithContainer* functions to configure specific attributes. +func WithContainer() Option { + return WithDetectors( + cgroupContainerIDDetector{}, + ) +} + +// WithContainerID adds an attribute with the id of the container to the configured Resource. +func WithContainerID() Option { + return WithDetectors(cgroupContainerIDDetector{}) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go new file mode 100644 index 0000000000000..6f7fd005b7a3d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go @@ -0,0 +1,100 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "bufio" + "context" + "errors" + "io" + "os" + "regexp" + + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +type containerIDProvider func() (string, error) + +var ( + containerID containerIDProvider = getContainerIDFromCGroup + cgroupContainerIDRe = regexp.MustCompile(`^.*/(?:.*-)?([0-9a-f]+)(?:\.|\s*$)`) +) + +type cgroupContainerIDDetector struct{} + +const cgroupPath = "/proc/self/cgroup" + +// Detect returns a *Resource that describes the id of the container. +// If no container id found, an empty resource will be returned. +func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) { + containerID, err := containerID() + if err != nil { + return nil, err + } + + if containerID == "" { + return Empty(), nil + } + return NewWithAttributes(semconv.SchemaURL, semconv.ContainerIDKey.String(containerID)), nil +} + +var ( + defaultOSStat = os.Stat + osStat = defaultOSStat + + defaultOSOpen = func(name string) (io.ReadCloser, error) { + return os.Open(name) + } + osOpen = defaultOSOpen +) + +// getContainerIDFromCGroup returns the id of the container from the cgroup file. +// If no container id found, an empty string will be returned. +func getContainerIDFromCGroup() (string, error) { + if _, err := osStat(cgroupPath); errors.Is(err, os.ErrNotExist) { + // File does not exist, skip + return "", nil + } + + file, err := osOpen(cgroupPath) + if err != nil { + return "", err + } + defer file.Close() + + return getContainerIDFromReader(file), nil +} + +// getContainerIDFromReader returns the id of the container from reader. +func getContainerIDFromReader(reader io.Reader) string { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + line := scanner.Text() + + if id := getContainerIDFromLine(line); id != "" { + return id + } + } + return "" +} + +// getContainerIDFromLine returns the id of the container from one string line. +func getContainerIDFromLine(line string) string { + matches := cgroupContainerIDRe.FindStringSubmatch(line) + if len(matches) <= 1 { + return "" + } + return matches[1] +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go index 9392296cbabf4..deebe363a1d10 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -17,11 +17,13 @@ package resource // import "go.opentelemetry.io/otel/sdk/resource" import ( "context" "fmt" + "net/url" "os" "strings" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" ) const ( @@ -42,10 +44,10 @@ var ( // builtin. type fromEnv struct{} -// compile time assertion that FromEnv implements Detector interface +// compile time assertion that FromEnv implements Detector interface. var _ Detector = fromEnv{} -// Detect collects resources from environment +// Detect collects resources from environment. func (fromEnv) Detect(context.Context) (*Resource, error) { attrs := strings.TrimSpace(os.Getenv(resourceAttrKey)) svcName := strings.TrimSpace(os.Getenv(svcNameKey)) @@ -88,7 +90,14 @@ func constructOTResources(s string) (*Resource, error) { invalid = append(invalid, p) continue } - k, v := strings.TrimSpace(field[0]), strings.TrimSpace(field[1]) + k := strings.TrimSpace(field[0]) + v, err := url.QueryUnescape(strings.TrimSpace(field[1])) + if err != nil { + // Retain original value if decoding fails, otherwise it will be + // an empty string. + v = field[1] + otel.Handle(err) + } attrs = append(attrs, attribute.String(k, v)) } var err error diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go index 59329770cf0f4..ac520dd867357 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -19,7 +19,7 @@ import ( "strings" "go.opentelemetry.io/otel/attribute" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" ) type osDescriptionProvider func() (string, error) diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go index 42894a15b5ca4..1c84afc1852ef 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go @@ -18,7 +18,6 @@ package resource // import "go.opentelemetry.io/otel/sdk/resource" import ( - "bytes" "fmt" "os" @@ -69,23 +68,14 @@ func uname() (string, error) { } return fmt.Sprintf("%s %s %s %s %s", - charsToString(utsName.Sysname[:]), - charsToString(utsName.Nodename[:]), - charsToString(utsName.Release[:]), - charsToString(utsName.Version[:]), - charsToString(utsName.Machine[:]), + unix.ByteSliceToString(utsName.Sysname[:]), + unix.ByteSliceToString(utsName.Nodename[:]), + unix.ByteSliceToString(utsName.Release[:]), + unix.ByteSliceToString(utsName.Version[:]), + unix.ByteSliceToString(utsName.Machine[:]), ), nil } -// charsToString converts a C-like null-terminated char array to a Go string. -func charsToString(charArray []byte) string { - if i := bytes.IndexByte(charArray, 0); i >= 0 { - charArray = charArray[:i] - } - - return string(charArray) -} - // getFirstAvailableFile returns an *os.File of the first available // file from a list of candidate file paths. func getFirstAvailableFile(candidates []string) (*os.File, error) { diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go index 80d5e6993235a..7eaddd34bfda3 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -22,7 +22,7 @@ import ( "path/filepath" "runtime" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" ) type pidProvider func() int @@ -39,7 +39,12 @@ var ( defaultExecutablePathProvider executablePathProvider = os.Executable defaultCommandArgsProvider commandArgsProvider = func() []string { return os.Args } defaultOwnerProvider ownerProvider = user.Current - defaultRuntimeNameProvider runtimeNameProvider = func() string { return runtime.Compiler } + defaultRuntimeNameProvider runtimeNameProvider = func() string { + if runtime.Compiler == "gc" { + return "go" + } + return runtime.Compiler + } defaultRuntimeVersionProvider runtimeVersionProvider = runtime.Version defaultRuntimeOSProvider runtimeOSProvider = func() string { return runtime.GOOS } defaultRuntimeArchProvider runtimeArchProvider = func() string { return runtime.GOARCH } diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go index e842744ae9189..c425ff05db508 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go @@ -129,6 +129,7 @@ func (r *Resource) Attributes() []attribute.KeyValue { return r.attrs.ToSlice() } +// SchemaURL returns the schema URL associated with Resource r. func (r *Resource) SchemaURL() string { if r == nil { return "" @@ -179,13 +180,14 @@ func Merge(a, b *Resource) (*Resource, error) { // Merge the schema URL. var schemaURL string - if a.schemaURL == "" { + switch true { + case a.schemaURL == "": schemaURL = b.schemaURL - } else if b.schemaURL == "" { + case b.schemaURL == "": schemaURL = a.schemaURL - } else if a.schemaURL == b.schemaURL { + case a.schemaURL == b.schemaURL: schemaURL = a.schemaURL - } else { + default: return Empty(), errMergeConflictSchemaURL } @@ -194,7 +196,7 @@ func Merge(a, b *Resource) (*Resource, error) { mi := attribute.NewMergeIterator(b.Set(), a.Set()) combine := make([]attribute.KeyValue, 0, a.Len()+b.Len()) for mi.Next() { - combine = append(combine, mi.Label()) + combine = append(combine, mi.Attribute()) } merged := NewWithAttributes(schemaURL, combine...) return merged, nil diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index 67e2732c3c756..a2d7db4900111 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -35,8 +35,11 @@ const ( DefaultMaxExportBatchSize = 512 ) +// BatchSpanProcessorOption configures a BatchSpanProcessor. type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions) +// BatchSpanProcessorOptions is configuration settings for a +// BatchSpanProcessor. type BatchSpanProcessorOptions struct { // MaxQueueSize is the maximum queue size to buffer spans for delayed processing. If the // queue gets full it drops the spans. Use BlockOnQueueFull to change this behavior. @@ -181,7 +184,7 @@ func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error { var err error if bsp.e != nil { flushCh := make(chan struct{}) - if bsp.enqueueBlockOnQueueFull(ctx, forceFlushSpan{flushed: flushCh}, true) { + if bsp.enqueueBlockOnQueueFull(ctx, forceFlushSpan{flushed: flushCh}) { select { case <-flushCh: // Processed any items in queue prior to ForceFlush being called @@ -205,30 +208,43 @@ func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error { return err } +// WithMaxQueueSize returns a BatchSpanProcessorOption that configures the +// maximum queue size allowed for a BatchSpanProcessor. func WithMaxQueueSize(size int) BatchSpanProcessorOption { return func(o *BatchSpanProcessorOptions) { o.MaxQueueSize = size } } +// WithMaxExportBatchSize returns a BatchSpanProcessorOption that configures +// the maximum export batch size allowed for a BatchSpanProcessor. func WithMaxExportBatchSize(size int) BatchSpanProcessorOption { return func(o *BatchSpanProcessorOptions) { o.MaxExportBatchSize = size } } +// WithBatchTimeout returns a BatchSpanProcessorOption that configures the +// maximum delay allowed for a BatchSpanProcessor before it will export any +// held span (whether the queue is full or not). func WithBatchTimeout(delay time.Duration) BatchSpanProcessorOption { return func(o *BatchSpanProcessorOptions) { o.BatchTimeout = delay } } +// WithExportTimeout returns a BatchSpanProcessorOption that configures the +// amount of time a BatchSpanProcessor waits for an exporter to export before +// abandoning the export. func WithExportTimeout(timeout time.Duration) BatchSpanProcessorOption { return func(o *BatchSpanProcessorOptions) { o.ExportTimeout = timeout } } +// WithBlocking returns a BatchSpanProcessorOption that configures a +// BatchSpanProcessor to wait for enqueue operations to succeed instead of +// dropping data when the queue is full. func WithBlocking() BatchSpanProcessorOption { return func(o *BatchSpanProcessorOptions) { o.BlockOnQueueFull = true @@ -237,7 +253,6 @@ func WithBlocking() BatchSpanProcessorOption { // exportSpans is a subroutine of processing and draining the queue. func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { - bsp.timer.Reset(bsp.o.BatchTimeout) bsp.batchMutex.Lock() @@ -250,7 +265,7 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { } if l := len(bsp.batch); l > 0 { - global.Debug("exporting spans", "count", len(bsp.batch), "dropped", atomic.LoadUint32(&bsp.dropped)) + global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped)) err := bsp.e.ExportSpans(ctx, bsp.batch) // A new batch is always created after exporting, even if the batch failed to be exported. @@ -335,28 +350,35 @@ func (bsp *batchSpanProcessor) drainQueue() { } func (bsp *batchSpanProcessor) enqueue(sd ReadOnlySpan) { - bsp.enqueueBlockOnQueueFull(context.TODO(), sd, bsp.o.BlockOnQueueFull) + ctx := context.TODO() + if bsp.o.BlockOnQueueFull { + bsp.enqueueBlockOnQueueFull(ctx, sd) + } else { + bsp.enqueueDrop(ctx, sd) + } } -func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd ReadOnlySpan, block bool) bool { +func recoverSendOnClosedChan() { + x := recover() + switch err := x.(type) { + case nil: + return + case runtime.Error: + if err.Error() == "send on closed channel" { + return + } + } + panic(x) +} + +func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd ReadOnlySpan) bool { if !sd.SpanContext().IsSampled() { return false } // This ensures the bsp.queue<- below does not panic as the // processor shuts down. - defer func() { - x := recover() - switch err := x.(type) { - case nil: - return - case runtime.Error: - if err.Error() == "send on closed channel" { - return - } - } - panic(x) - }() + defer recoverSendOnClosedChan() select { case <-bsp.stopCh: @@ -364,13 +386,27 @@ func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd R default: } - if block { - select { - case bsp.queue <- sd: - return true - case <-ctx.Done(): - return false - } + select { + case bsp.queue <- sd: + return true + case <-ctx.Done(): + return false + } +} + +func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) bool { + if !sd.SpanContext().IsSampled() { + return false + } + + // This ensures the bsp.queue<- below does not panic as the + // processor shuts down. + defer recoverSendOnClosedChan() + + select { + case <-bsp.stopCh: + return false + default: } select { diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/config.go b/vendor/go.opentelemetry.io/otel/sdk/trace/config.go deleted file mode 100644 index 61a30439251f9..0000000000000 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/config.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace // import "go.opentelemetry.io/otel/sdk/trace" - -// SpanLimits represents the limits of a span. -type SpanLimits struct { - // AttributeCountLimit is the maximum allowed span attribute count. - AttributeCountLimit int - - // EventCountLimit is the maximum allowed span event count. - EventCountLimit int - - // LinkCountLimit is the maximum allowed span link count. - LinkCountLimit int - - // AttributePerEventCountLimit is the maximum allowed attribute per span event count. - AttributePerEventCountLimit int - - // AttributePerLinkCountLimit is the maximum allowed attribute per span link count. - AttributePerLinkCountLimit int -} - -func (sl *SpanLimits) ensureDefault() { - if sl.EventCountLimit <= 0 { - sl.EventCountLimit = DefaultEventCountLimit - } - if sl.AttributeCountLimit <= 0 { - sl.AttributeCountLimit = DefaultAttributeCountLimit - } - if sl.LinkCountLimit <= 0 { - sl.LinkCountLimit = DefaultLinkCountLimit - } - if sl.AttributePerEventCountLimit <= 0 { - sl.AttributePerEventCountLimit = DefaultAttributePerEventCountLimit - } - if sl.AttributePerLinkCountLimit <= 0 { - sl.AttributePerLinkCountLimit = DefaultAttributePerLinkCountLimit - } -} - -const ( - // DefaultAttributeCountLimit is the default maximum allowed span attribute count. - DefaultAttributeCountLimit = 128 - - // DefaultEventCountLimit is the default maximum allowed span event count. - DefaultEventCountLimit = 128 - - // DefaultLinkCountLimit is the default maximum allowed span link count. - DefaultLinkCountLimit = 128 - - // DefaultAttributePerEventCountLimit is the default maximum allowed attribute per span event count. - DefaultAttributePerEventCountLimit = 128 - - // DefaultAttributePerLinkCountLimit is the default maximum allowed attribute per span link count. - DefaultAttributePerLinkCountLimit = 128 -) diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go index 8e89e19d4b991..d1c86e59b22d5 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go @@ -29,7 +29,12 @@ func newEvictedQueue(capacity int) evictedQueue { // add adds value to the evictedQueue eq. If eq is at capacity, the oldest // queued value will be discarded and the drop count incremented. func (eq *evictedQueue) add(value interface{}) { - if len(eq.queue) == eq.capacity { + if eq.capacity == 0 { + eq.droppedCount++ + return + } + + if eq.capacity > 0 && len(eq.queue) == eq.capacity { // Drop first-in while avoiding allocating more capacity to eq.queue. copy(eq.queue[:eq.capacity-1], eq.queue[1:]) eq.queue = eq.queue[:eq.capacity-1] diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go index c9e2802ac5302..bba246041a447 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go @@ -52,7 +52,7 @@ func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.Trace gen.Lock() defer gen.Unlock() sid := trace.SpanID{} - gen.randSource.Read(sid[:]) + _, _ = gen.randSource.Read(sid[:]) return sid } @@ -62,9 +62,9 @@ func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace. gen.Lock() defer gen.Unlock() tid := trace.TraceID{} - gen.randSource.Read(tid[:]) + _, _ = gen.randSource.Read(tid[:]) sid := trace.SpanID{} - gen.randSource.Read(sid[:]) + _, _ = gen.randSource.Read(sid[:]) return tid, sid } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go index c6b311f9cdc88..201c17817004d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -31,7 +31,7 @@ const ( defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer" ) -// tracerProviderConfig +// tracerProviderConfig. type tracerProviderConfig struct { // processors contains collection of SpanProcessors that are processing pipeline // for spans in the trace signal. @@ -70,10 +70,13 @@ func (cfg tracerProviderConfig) MarshalLog() interface{} { } } +// TracerProvider is an OpenTelemetry TracerProvider. It provides Tracers to +// instrumentation so it can trace operational flow through a system. type TracerProvider struct { mu sync.Mutex - namedTracer map[instrumentation.Library]*tracer + namedTracer map[instrumentation.Scope]*tracer spanProcessors atomic.Value + isShutdown bool // These fields are not protected by the lock mu. They are assumed to be // immutable after creation of the TracerProvider. @@ -88,15 +91,18 @@ var _ trace.TracerProvider = &TracerProvider{} // NewTracerProvider returns a new and configured TracerProvider. // // By default the returned TracerProvider is configured with: -// - a ParentBased(AlwaysSample) Sampler -// - a random number IDGenerator -// - the resource.Default() Resource -// - the default SpanLimits. +// - a ParentBased(AlwaysSample) Sampler +// - a random number IDGenerator +// - the resource.Default() Resource +// - the default SpanLimits. // // The passed opts are used to override these default values and configure the // returned TracerProvider appropriately. func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider { - o := tracerProviderConfig{} + o := tracerProviderConfig{ + spanLimits: NewSpanLimits(), + } + o = applyTracerProviderEnvConfigs(o) for _, opt := range opts { o = opt.apply(o) @@ -105,18 +111,19 @@ func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider { o = ensureValidTracerProviderConfig(o) tp := &TracerProvider{ - namedTracer: make(map[instrumentation.Library]*tracer), + namedTracer: make(map[instrumentation.Scope]*tracer), sampler: o.sampler, idGenerator: o.idGenerator, spanLimits: o.spanLimits, resource: o.resource, } - global.Info("TracerProvider created", "config", o) + spss := spanProcessorStates{} for _, sp := range o.processors { - tp.RegisterSpanProcessor(sp) + spss = append(spss, newSpanProcessorState(sp)) } + tp.spanProcessors.Store(spss) return tp } @@ -136,62 +143,62 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T if name == "" { name = defaultTracerName } - il := instrumentation.Library{ + is := instrumentation.Scope{ Name: name, Version: c.InstrumentationVersion(), SchemaURL: c.SchemaURL(), } - t, ok := p.namedTracer[il] + t, ok := p.namedTracer[is] if !ok { t = &tracer{ - provider: p, - instrumentationLibrary: il, + provider: p, + instrumentationScope: is, } - p.namedTracer[il] = t + p.namedTracer[is] = t global.Info("Tracer created", "name", name, "version", c.InstrumentationVersion(), "schemaURL", c.SchemaURL()) } return t } -// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors -func (p *TracerProvider) RegisterSpanProcessor(s SpanProcessor) { +// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors. +func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) { p.mu.Lock() defer p.mu.Unlock() - new := spanProcessorStates{} - if old, ok := p.spanProcessors.Load().(spanProcessorStates); ok { - new = append(new, old...) - } - newSpanSync := &spanProcessorState{ - sp: s, - state: &sync.Once{}, + if p.isShutdown { + return } - new = append(new, newSpanSync) - p.spanProcessors.Store(new) + newSPS := spanProcessorStates{} + newSPS = append(newSPS, p.spanProcessors.Load().(spanProcessorStates)...) + newSPS = append(newSPS, newSpanProcessorState(sp)) + p.spanProcessors.Store(newSPS) } -// UnregisterSpanProcessor removes the given SpanProcessor from the list of SpanProcessors -func (p *TracerProvider) UnregisterSpanProcessor(s SpanProcessor) { +// UnregisterSpanProcessor removes the given SpanProcessor from the list of SpanProcessors. +func (p *TracerProvider) UnregisterSpanProcessor(sp SpanProcessor) { p.mu.Lock() defer p.mu.Unlock() - spss := spanProcessorStates{} - old, ok := p.spanProcessors.Load().(spanProcessorStates) - if !ok || len(old) == 0 { + if p.isShutdown { return } + old := p.spanProcessors.Load().(spanProcessorStates) + if len(old) == 0 { + return + } + spss := spanProcessorStates{} spss = append(spss, old...) // stop the span processor if it is started and remove it from the list var stopOnce *spanProcessorState var idx int for i, sps := range spss { - if sps.sp == s { + if sps.sp == sp { stopOnce = sps idx = i } } if stopOnce != nil { stopOnce.state.Do(func() { - if err := s.Shutdown(context.Background()); err != nil { + if err := sp.Shutdown(context.Background()); err != nil { otel.Handle(err) } }) @@ -208,10 +215,7 @@ func (p *TracerProvider) UnregisterSpanProcessor(s SpanProcessor) { // ForceFlush immediately exports all spans that have not yet been exported for // all the registered span processors. func (p *TracerProvider) ForceFlush(ctx context.Context) error { - spss, ok := p.spanProcessors.Load().(spanProcessorStates) - if !ok { - return fmt.Errorf("failed to load span processors") - } + spss := p.spanProcessors.Load().(spanProcessorStates) if len(spss) == 0 { return nil } @@ -230,16 +234,19 @@ func (p *TracerProvider) ForceFlush(ctx context.Context) error { return nil } -// Shutdown shuts down the span processors in the order they were registered. +// Shutdown shuts down TracerProvider. All registered span processors are shut down +// in the order they were registered and any held computational resources are released. func (p *TracerProvider) Shutdown(ctx context.Context) error { - spss, ok := p.spanProcessors.Load().(spanProcessorStates) - if !ok { - return fmt.Errorf("failed to load span processors") - } + spss := p.spanProcessors.Load().(spanProcessorStates) if len(spss) == 0 { return nil } + p.mu.Lock() + defer p.mu.Unlock() + p.isShutdown = true + + var retErr error for _, sps := range spss { select { case <-ctx.Done(): @@ -252,12 +259,19 @@ func (p *TracerProvider) Shutdown(ctx context.Context) error { err = sps.sp.Shutdown(ctx) }) if err != nil { - return err + if retErr == nil { + retErr = err + } else { + // Poor man's list of errors + retErr = fmt.Errorf("%v; %v", retErr, err) + } } } - return nil + p.spanProcessors.Store(spanProcessorStates{}) + return retErr } +// TracerProviderOption configures a TracerProvider. type TracerProviderOption interface { apply(tracerProviderConfig) tracerProviderConfig } @@ -333,7 +347,10 @@ func WithIDGenerator(g IDGenerator) TracerProviderOption { // Tracers the TracerProvider creates to make their sampling decisions for the // Spans they create. // -// If this option is not used, the TracerProvider will use a +// This option overrides the Sampler configured through the OTEL_TRACES_SAMPLER +// and OTEL_TRACES_SAMPLER_ARG environment variables. If this option is not used +// and the sampler is not configured through environment variables or the environment +// contains invalid/unsupported configuration, the TracerProvider will use a // ParentBased(AlwaysSample) Sampler by default. func WithSampler(s Sampler) TracerProviderOption { return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { @@ -344,20 +361,91 @@ func WithSampler(s Sampler) TracerProviderOption { }) } -// WithSpanLimits returns a TracerProviderOption that will configure the -// SpanLimits sl as a TracerProvider's SpanLimits. The configured SpanLimits -// are used used by the Tracers the TracerProvider and the Spans they create -// to limit tracing resources used. +// WithSpanLimits returns a TracerProviderOption that configures a +// TracerProvider to use the SpanLimits sl. These SpanLimits bound any Span +// created by a Tracer from the TracerProvider. +// +// If any field of sl is zero or negative it will be replaced with the default +// value for that field. +// +// If this or WithRawSpanLimits are not provided, the TracerProvider will use +// the limits defined by environment variables, or the defaults if unset. +// Refer to the NewSpanLimits documentation for information about this +// relationship. // -// If this option is not used, the TracerProvider will use the default -// SpanLimits. +// Deprecated: Use WithRawSpanLimits instead which allows setting unlimited +// and zero limits. This option will be kept until the next major version +// incremented release. func WithSpanLimits(sl SpanLimits) TracerProviderOption { + if sl.AttributeValueLengthLimit <= 0 { + sl.AttributeValueLengthLimit = DefaultAttributeValueLengthLimit + } + if sl.AttributeCountLimit <= 0 { + sl.AttributeCountLimit = DefaultAttributeCountLimit + } + if sl.EventCountLimit <= 0 { + sl.EventCountLimit = DefaultEventCountLimit + } + if sl.AttributePerEventCountLimit <= 0 { + sl.AttributePerEventCountLimit = DefaultAttributePerEventCountLimit + } + if sl.LinkCountLimit <= 0 { + sl.LinkCountLimit = DefaultLinkCountLimit + } + if sl.AttributePerLinkCountLimit <= 0 { + sl.AttributePerLinkCountLimit = DefaultAttributePerLinkCountLimit + } return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { cfg.spanLimits = sl return cfg }) } +// WithRawSpanLimits returns a TracerProviderOption that configures a +// TracerProvider to use these limits. These limits bound any Span created by +// a Tracer from the TracerProvider. +// +// The limits will be used as-is. Zero or negative values will not be changed +// to the default value like WithSpanLimits does. Setting a limit to zero will +// effectively disable the related resource it limits and setting to a +// negative value will mean that resource is unlimited. Consequentially, this +// means that the zero-value SpanLimits will disable all span resources. +// Because of this, limits should be constructed using NewSpanLimits and +// updated accordingly. +// +// If this or WithSpanLimits are not provided, the TracerProvider will use the +// limits defined by environment variables, or the defaults if unset. Refer to +// the NewSpanLimits documentation for information about this relationship. +func WithRawSpanLimits(limits SpanLimits) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + cfg.spanLimits = limits + return cfg + }) +} + +func applyTracerProviderEnvConfigs(cfg tracerProviderConfig) tracerProviderConfig { + for _, opt := range tracerProviderOptionsFromEnv() { + cfg = opt.apply(cfg) + } + + return cfg +} + +func tracerProviderOptionsFromEnv() []TracerProviderOption { + var opts []TracerProviderOption + + sampler, err := samplerFromEnv() + if err != nil { + otel.Handle(err) + } + + if sampler != nil { + opts = append(opts, WithSampler(sampler)) + } + + return opts +} + // ensureValidTracerProviderConfig ensures that given TracerProviderConfig is valid. func ensureValidTracerProviderConfig(cfg tracerProviderConfig) tracerProviderConfig { if cfg.sampler == nil { @@ -366,7 +454,6 @@ func ensureValidTracerProviderConfig(cfg tracerProviderConfig) tracerProviderCon if cfg.idGenerator == nil { cfg.idGenerator = defaultIDGenerator() } - cfg.spanLimits.ensureDefault() if cfg.resource == nil { cfg.resource = resource.Default() } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go new file mode 100644 index 0000000000000..02053b318aeb4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go @@ -0,0 +1,108 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "errors" + "fmt" + "os" + "strconv" + "strings" +) + +const ( + tracesSamplerKey = "OTEL_TRACES_SAMPLER" + tracesSamplerArgKey = "OTEL_TRACES_SAMPLER_ARG" + + samplerAlwaysOn = "always_on" + samplerAlwaysOff = "always_off" + samplerTraceIDRatio = "traceidratio" + samplerParentBasedAlwaysOn = "parentbased_always_on" + samplerParsedBasedAlwaysOff = "parentbased_always_off" + samplerParentBasedTraceIDRatio = "parentbased_traceidratio" +) + +type errUnsupportedSampler string + +func (e errUnsupportedSampler) Error() string { + return fmt.Sprintf("unsupported sampler: %s", string(e)) +} + +var ( + errNegativeTraceIDRatio = errors.New("invalid trace ID ratio: less than 0.0") + errGreaterThanOneTraceIDRatio = errors.New("invalid trace ID ratio: greater than 1.0") +) + +type samplerArgParseError struct { + parseErr error +} + +func (e samplerArgParseError) Error() string { + return fmt.Sprintf("parsing sampler argument: %s", e.parseErr.Error()) +} + +func (e samplerArgParseError) Unwrap() error { + return e.parseErr +} + +func samplerFromEnv() (Sampler, error) { + sampler, ok := os.LookupEnv(tracesSamplerKey) + if !ok { + return nil, nil + } + + sampler = strings.ToLower(strings.TrimSpace(sampler)) + samplerArg, hasSamplerArg := os.LookupEnv(tracesSamplerArgKey) + samplerArg = strings.TrimSpace(samplerArg) + + switch sampler { + case samplerAlwaysOn: + return AlwaysSample(), nil + case samplerAlwaysOff: + return NeverSample(), nil + case samplerTraceIDRatio: + if !hasSamplerArg { + return TraceIDRatioBased(1.0), nil + } + return parseTraceIDRatio(samplerArg) + case samplerParentBasedAlwaysOn: + return ParentBased(AlwaysSample()), nil + case samplerParsedBasedAlwaysOff: + return ParentBased(NeverSample()), nil + case samplerParentBasedTraceIDRatio: + if !hasSamplerArg { + return ParentBased(TraceIDRatioBased(1.0)), nil + } + ratio, err := parseTraceIDRatio(samplerArg) + return ParentBased(ratio), err + default: + return nil, errUnsupportedSampler(sampler) + } +} + +func parseTraceIDRatio(arg string) (Sampler, error) { + v, err := strconv.ParseFloat(arg, 64) + if err != nil { + return TraceIDRatioBased(1.0), samplerArgParseError{err} + } + if v < 0.0 { + return TraceIDRatioBased(1.0), errNegativeTraceIDRatio + } + if v > 1.0 { + return TraceIDRatioBased(1.0), errGreaterThanOneTraceIDRatio + } + + return TraceIDRatioBased(v), nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go index a4ac588f66683..5ee9715d27bb9 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go @@ -53,17 +53,17 @@ type SamplingParameters struct { // SamplingDecision indicates whether a span is dropped, recorded and/or sampled. type SamplingDecision uint8 -// Valid sampling decisions +// Valid sampling decisions. const ( - // Drop will not record the span and all attributes/events will be dropped + // Drop will not record the span and all attributes/events will be dropped. Drop SamplingDecision = iota // Record indicates the span's `IsRecording() == true`, but `Sampled` flag - // *must not* be set + // *must not* be set. RecordOnly // RecordAndSample has span's `IsRecording() == true` and `Sampled` flag - // *must* be set + // *must* be set. RecordAndSample ) @@ -81,7 +81,7 @@ type traceIDRatioSampler struct { func (ts traceIDRatioSampler) ShouldSample(p SamplingParameters) SamplingResult { psc := trace.SpanContextFromContext(p.ParentContext) - x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1 + x := binary.BigEndian.Uint64(p.TraceID[8:16]) >> 1 if x < ts.traceIDUpperBound { return SamplingResult{ Decision: RecordAndSample, @@ -102,6 +102,7 @@ func (ts traceIDRatioSampler) Description() string { // always sample. Fractions < 0 are treated as zero. To respect the // parent trace's `SampledFlag`, the `TraceIDRatioBased` sampler should be used // as a delegate of a `Parent` sampler. +// //nolint:revive // revive complains about stutter of `trace.TraceIDRatioBased` func TraceIDRatioBased(fraction float64) Sampler { if fraction >= 1 { @@ -162,10 +163,10 @@ func NeverSample() Sampler { // the root(Sampler) is used to make sampling decision. If the span has // a parent, depending on whether the parent is remote and whether it // is sampled, one of the following samplers will apply: -// - remoteParentSampled(Sampler) (default: AlwaysOn) -// - remoteParentNotSampled(Sampler) (default: AlwaysOff) -// - localParentSampled(Sampler) (default: AlwaysOn) -// - localParentNotSampled(Sampler) (default: AlwaysOff) +// - remoteParentSampled(Sampler) (default: AlwaysOn) +// - remoteParentNotSampled(Sampler) (default: AlwaysOff) +// - localParentSampled(Sampler) (default: AlwaysOn) +// - localParentNotSampled(Sampler) (default: AlwaysOff) func ParentBased(root Sampler, samplers ...ParentBasedSamplerOption) Sampler { return parentBased{ root: root, diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go index d31d3c1caff6f..e8530a9593295 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go @@ -116,7 +116,7 @@ func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error { return nil } -// MarshalLog is the marshaling function used by the logging system to represent this exporter. +// MarshalLog is the marshaling function used by the logging system to represent this Span Processor. func (ssp *simpleSpanProcessor) MarshalLog() interface{} { return struct { Type string diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go index 53aac61f5fe1d..0349b2f198e4d 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go @@ -26,22 +26,22 @@ import ( // snapshot is an record of a spans state at a particular checkpointed time. // It is used as a read-only representation of that state. type snapshot struct { - name string - spanContext trace.SpanContext - parent trace.SpanContext - spanKind trace.SpanKind - startTime time.Time - endTime time.Time - attributes []attribute.KeyValue - events []Event - links []Link - status Status - childSpanCount int - droppedAttributeCount int - droppedEventCount int - droppedLinkCount int - resource *resource.Resource - instrumentationLibrary instrumentation.Library + name string + spanContext trace.SpanContext + parent trace.SpanContext + spanKind trace.SpanKind + startTime time.Time + endTime time.Time + attributes []attribute.KeyValue + events []Event + links []Link + status Status + childSpanCount int + droppedAttributeCount int + droppedEventCount int + droppedLinkCount int + resource *resource.Resource + instrumentationScope instrumentation.Scope } var _ ReadOnlySpan = snapshot{} @@ -102,10 +102,16 @@ func (s snapshot) Status() Status { return s.status } +// InstrumentationScope returns information about the instrumentation +// scope that created the span. +func (s snapshot) InstrumentationScope() instrumentation.Scope { + return s.instrumentationScope +} + // InstrumentationLibrary returns information about the instrumentation // library that created the span. func (s snapshot) InstrumentationLibrary() instrumentation.Library { - return s.instrumentationLibrary + return s.instrumentationScope } // Resource returns information about the entity that produced the span. diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go index 779cde691ddfc..5abb0b274d4e5 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -20,15 +20,17 @@ import ( "reflect" "runtime" rt "runtime/trace" + "strings" "sync" "time" + "unicode/utf8" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/sdk/instrumentation" "go.opentelemetry.io/otel/sdk/internal" "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" "go.opentelemetry.io/otel/trace" ) @@ -63,8 +65,12 @@ type ReadOnlySpan interface { Events() []Event // Status returns the spans status. Status() Status + // InstrumentationScope returns information about the instrumentation + // scope that created the span. + InstrumentationScope() instrumentation.Scope // InstrumentationLibrary returns information about the instrumentation // library that created the span. + // Deprecated: please use InstrumentationScope instead. InstrumentationLibrary() instrumentation.Library // Resource returns information about the entity that produced the span. Resource() *resource.Resource @@ -183,15 +189,18 @@ func (s *recordingSpan) SetStatus(code codes.Code, description string) { if !s.IsRecording() { return } + s.mu.Lock() + defer s.mu.Unlock() + if s.status.Code > code { + return + } status := Status{Code: code} if code == codes.Error { status.Description = description } - s.mu.Lock() s.status = status - s.mu.Unlock() } // SetAttributes sets attributes of this span. @@ -212,10 +221,17 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { s.mu.Lock() defer s.mu.Unlock() + limit := s.tracer.provider.spanLimits.AttributeCountLimit + if limit == 0 { + // No attributes allowed. + s.droppedAttributes += len(attributes) + return + } + // If adding these attributes could exceed the capacity of s perform a // de-duplication and truncation while adding to avoid over allocation. - if len(s.attributes)+len(attributes) > s.tracer.provider.spanLimits.AttributeCountLimit { - s.addOverCapAttrs(attributes) + if limit > 0 && len(s.attributes)+len(attributes) > limit { + s.addOverCapAttrs(limit, attributes) return } @@ -227,21 +243,25 @@ func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { s.droppedAttributes++ continue } + a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) s.attributes = append(s.attributes, a) } } // addOverCapAttrs adds the attributes attrs to the span s while // de-duplicating the attributes of s and attrs and dropping attributes that -// exceed the capacity of s. +// exceed the limit. // // This method assumes s.mu.Lock is held by the caller. // // This method should only be called when there is a possibility that adding -// attrs to s will exceed the capacity of s. Otherwise, attrs should be added -// to s without checking for duplicates and all retrieval methods of the -// attributes for s will de-duplicate as needed. -func (s *recordingSpan) addOverCapAttrs(attrs []attribute.KeyValue) { +// attrs to s will exceed the limit. Otherwise, attrs should be added to s +// without checking for duplicates and all retrieval methods of the attributes +// for s will de-duplicate as needed. +// +// This method assumes limit is a value > 0. The argument should be validated +// by the caller. +func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) { // In order to not allocate more capacity to s.attributes than needed, // prune and truncate this addition of attributes while adding. @@ -265,17 +285,73 @@ func (s *recordingSpan) addOverCapAttrs(attrs []attribute.KeyValue) { continue } - if len(s.attributes) >= s.tracer.provider.spanLimits.AttributeCountLimit { + if len(s.attributes) >= limit { // Do not just drop all of the remaining attributes, make sure // updates are checked and performed. s.droppedAttributes++ } else { + a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a) s.attributes = append(s.attributes, a) exists[a.Key] = len(s.attributes) - 1 } } } +// truncateAttr returns a truncated version of attr. Only string and string +// slice attribute values are truncated. String values are truncated to at +// most a length of limit. Each string slice value is truncated in this fashion +// (the slice length itself is unaffected). +// +// No truncation is perfromed for a negative limit. +func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue { + if limit < 0 { + return attr + } + switch attr.Value.Type() { + case attribute.STRING: + if v := attr.Value.AsString(); len(v) > limit { + return attr.Key.String(safeTruncate(v, limit)) + } + case attribute.STRINGSLICE: + v := attr.Value.AsStringSlice() + for i := range v { + if len(v[i]) > limit { + v[i] = safeTruncate(v[i], limit) + } + } + return attr.Key.StringSlice(v) + } + return attr +} + +// safeTruncate truncates the string and guarantees valid UTF-8 is returned. +func safeTruncate(input string, limit int) string { + if trunc, ok := safeTruncateValidUTF8(input, limit); ok { + return trunc + } + trunc, _ := safeTruncateValidUTF8(strings.ToValidUTF8(input, ""), limit) + return trunc +} + +// safeTruncateValidUTF8 returns a copy of the input string safely truncated to +// limit. The truncation is ensured to occur at the bounds of complete UTF-8 +// characters. If invalid encoding of UTF-8 is encountered, input is returned +// with false, otherwise, the truncated input will be returned with true. +func safeTruncateValidUTF8(input string, limit int) (string, bool) { + for cnt := 0; cnt <= limit; { + r, size := utf8.DecodeRuneInString(input[cnt:]) + if r == utf8.RuneError { + return input, false + } + + if cnt+size > limit { + return input[:cnt], true + } + cnt += size + } + return input, true +} + // End ends the span. This method does nothing if the span is already ended or // is not being recorded. // @@ -334,14 +410,13 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) { } s.mu.Unlock() - if sps, ok := s.tracer.provider.spanProcessors.Load().(spanProcessorStates); ok { - if len(sps) == 0 { - return - } - snap := s.snapshot() - for _, sp := range sps { - sp.sp.OnEnd(snap) - } + sps := s.tracer.provider.spanProcessors.Load().(spanProcessorStates) + if len(sps) == 0 { + return + } + snap := s.snapshot() + for _, sp := range sps { + sp.sp.OnEnd(snap) } } @@ -396,22 +471,23 @@ func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) { func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) { c := trace.NewEventConfig(o...) + e := Event{Name: name, Attributes: c.Attributes(), Time: c.Timestamp()} - // Discard over limited attributes - attributes := c.Attributes() - var discarded int - if len(attributes) > s.tracer.provider.spanLimits.AttributePerEventCountLimit { - discarded = len(attributes) - s.tracer.provider.spanLimits.AttributePerEventCountLimit - attributes = attributes[:s.tracer.provider.spanLimits.AttributePerEventCountLimit] + // Discard attributes over limit. + limit := s.tracer.provider.spanLimits.AttributePerEventCountLimit + if limit == 0 { + // Drop all attributes. + e.DroppedAttributeCount = len(e.Attributes) + e.Attributes = nil + } else if limit > 0 && len(e.Attributes) > limit { + // Drop over capacity. + e.DroppedAttributeCount = len(e.Attributes) - limit + e.Attributes = e.Attributes[:limit] } + s.mu.Lock() - defer s.mu.Unlock() - s.events.add(Event{ - Name: name, - Attributes: attributes, - DroppedAttributeCount: discarded, - Time: c.Timestamp(), - }) + s.events.add(e) + s.mu.Unlock() } // SetName sets the name of this span. If this span is not being recorded than @@ -531,12 +607,20 @@ func (s *recordingSpan) Status() Status { return s.status } +// InstrumentationScope returns the instrumentation.Scope associated with +// the Tracer that created this span. +func (s *recordingSpan) InstrumentationScope() instrumentation.Scope { + s.mu.Lock() + defer s.mu.Unlock() + return s.tracer.instrumentationScope +} + // InstrumentationLibrary returns the instrumentation.Library associated with // the Tracer that created this span. func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { s.mu.Lock() defer s.mu.Unlock() - return s.tracer.instrumentationLibrary + return s.tracer.instrumentationScope } // Resource returns the Resource associated with the Tracer that created this @@ -551,18 +635,23 @@ func (s *recordingSpan) addLink(link trace.Link) { if !s.IsRecording() || !link.SpanContext.IsValid() { return } - s.mu.Lock() - defer s.mu.Unlock() - var droppedAttributeCount int + l := Link{SpanContext: link.SpanContext, Attributes: link.Attributes} - // Discard over limited attributes - if len(link.Attributes) > s.tracer.provider.spanLimits.AttributePerLinkCountLimit { - droppedAttributeCount = len(link.Attributes) - s.tracer.provider.spanLimits.AttributePerLinkCountLimit - link.Attributes = link.Attributes[:s.tracer.provider.spanLimits.AttributePerLinkCountLimit] + // Discard attributes over limit. + limit := s.tracer.provider.spanLimits.AttributePerLinkCountLimit + if limit == 0 { + // Drop all attributes. + l.DroppedAttributeCount = len(l.Attributes) + l.Attributes = nil + } else if limit > 0 && len(l.Attributes) > limit { + l.DroppedAttributeCount = len(l.Attributes) - limit + l.Attributes = l.Attributes[:limit] } - s.links.add(Link{link.SpanContext, link.Attributes, droppedAttributeCount}) + s.mu.Lock() + s.links.add(l) + s.mu.Unlock() } // DroppedAttributes returns the number of attributes dropped by the span @@ -610,7 +699,7 @@ func (s *recordingSpan) snapshot() ReadOnlySpan { defer s.mu.Unlock() sd.endTime = s.endTime - sd.instrumentationLibrary = s.tracer.instrumentationLibrary + sd.instrumentationScope = s.tracer.instrumentationScope sd.name = s.name sd.parent = s.parent sd.resource = s.tracer.provider.resource diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go new file mode 100644 index 0000000000000..aa4d4221db36d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go @@ -0,0 +1,125 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import "go.opentelemetry.io/otel/sdk/internal/env" + +const ( + // DefaultAttributeValueLengthLimit is the default maximum allowed + // attribute value length, unlimited. + DefaultAttributeValueLengthLimit = -1 + + // DefaultAttributeCountLimit is the default maximum number of attributes + // a span can have. + DefaultAttributeCountLimit = 128 + + // DefaultEventCountLimit is the default maximum number of events a span + // can have. + DefaultEventCountLimit = 128 + + // DefaultLinkCountLimit is the default maximum number of links a span can + // have. + DefaultLinkCountLimit = 128 + + // DefaultAttributePerEventCountLimit is the default maximum number of + // attributes a span event can have. + DefaultAttributePerEventCountLimit = 128 + + // DefaultAttributePerLinkCountLimit is the default maximum number of + // attributes a span link can have. + DefaultAttributePerLinkCountLimit = 128 +) + +// SpanLimits represents the limits of a span. +type SpanLimits struct { + // AttributeValueLengthLimit is the maximum allowed attribute value length. + // + // This limit only applies to string and string slice attribute values. + // Any string longer than this value will be truncated to this length. + // + // Setting this to a negative value means no limit is applied. + AttributeValueLengthLimit int + + // AttributeCountLimit is the maximum allowed span attribute count. Any + // attribute added to a span once this limit is reached will be dropped. + // + // Setting this to zero means no attributes will be recorded. + // + // Setting this to a negative value means no limit is applied. + AttributeCountLimit int + + // EventCountLimit is the maximum allowed span event count. Any event + // added to a span once this limit is reached means it will be added but + // the oldest event will be dropped. + // + // Setting this to zero means no events we be recorded. + // + // Setting this to a negative value means no limit is applied. + EventCountLimit int + + // LinkCountLimit is the maximum allowed span link count. Any link added + // to a span once this limit is reached means it will be added but the + // oldest link will be dropped. + // + // Setting this to zero means no links we be recorded. + // + // Setting this to a negative value means no limit is applied. + LinkCountLimit int + + // AttributePerEventCountLimit is the maximum number of attributes allowed + // per span event. Any attribute added after this limit reached will be + // dropped. + // + // Setting this to zero means no attributes will be recorded for events. + // + // Setting this to a negative value means no limit is applied. + AttributePerEventCountLimit int + + // AttributePerLinkCountLimit is the maximum number of attributes allowed + // per span link. Any attribute added after this limit reached will be + // dropped. + // + // Setting this to zero means no attributes will be recorded for links. + // + // Setting this to a negative value means no limit is applied. + AttributePerLinkCountLimit int +} + +// NewSpanLimits returns a SpanLimits with all limits set to the value their +// corresponding environment variable holds, or the default if unset. +// +// • AttributeValueLengthLimit: OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT +// (default: unlimited) +// +// • AttributeCountLimit: OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT (default: 128) +// +// • EventCountLimit: OTEL_SPAN_EVENT_COUNT_LIMIT (default: 128) +// +// • AttributePerEventCountLimit: OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT (default: +// 128) +// +// • LinkCountLimit: OTEL_SPAN_LINK_COUNT_LIMIT (default: 128) +// +// • AttributePerLinkCountLimit: OTEL_LINK_ATTRIBUTE_COUNT_LIMIT (default: 128) +func NewSpanLimits() SpanLimits { + return SpanLimits{ + AttributeValueLengthLimit: env.SpanAttributeValueLength(DefaultAttributeValueLengthLimit), + AttributeCountLimit: env.SpanAttributeCount(DefaultAttributeCountLimit), + EventCountLimit: env.SpanEventCount(DefaultEventCountLimit), + LinkCountLimit: env.SpanLinkCount(DefaultLinkCountLimit), + AttributePerEventCountLimit: env.SpanEventAttributeCount(DefaultAttributePerEventCountLimit), + AttributePerLinkCountLimit: env.SpanLinkAttributeCount(DefaultAttributePerLinkCountLimit), + } +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go index b649a2ff049f5..e6ae19352195b 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go @@ -64,4 +64,9 @@ type spanProcessorState struct { sp SpanProcessor state *sync.Once } + +func newSpanProcessorState(sp SpanProcessor) *spanProcessorState { + return &spanProcessorState{sp: sp, state: &sync.Once{}} +} + type spanProcessorStates []*spanProcessorState diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go index 5b8ab43be3dce..f17d924b89e37 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go @@ -23,8 +23,8 @@ import ( ) type tracer struct { - provider *TracerProvider - instrumentationLibrary instrumentation.Library + provider *TracerProvider + instrumentationScope instrumentation.Scope } var _ trace.Tracer = &tracer{} @@ -37,6 +37,11 @@ var _ trace.Tracer = &tracer{} func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanStartOption) (context.Context, trace.Span) { config := trace.NewSpanStartConfig(options...) + if ctx == nil { + // Prevent trace.ContextWithSpan from panicking. + ctx = context.Background() + } + // For local spans created by this SDK, track child span count. if p := trace.SpanFromContext(ctx); p != nil { if sdkSpan, ok := p.(*recordingSpan); ok { @@ -46,7 +51,7 @@ func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanS s := tr.newSpan(ctx, name, &config) if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() { - sps, _ := tr.provider.spanProcessors.Load().(spanProcessorStates) + sps := tr.provider.spanProcessors.Load().(spanProcessorStates) for _, sp := range sps { sp.sp.OnStart(ctx, rw) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go index dcf32c148dd63..06673a1c04966 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/recorder.go @@ -32,6 +32,7 @@ type SpanRecorder struct { var _ sdktrace.SpanProcessor = (*SpanRecorder)(nil) +// NewSpanRecorder returns a new initialized SpanRecorder. func NewSpanRecorder() *SpanRecorder { return new(SpanRecorder) } diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go index ece4633c52593..bfe73de9c4156 100644 --- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracetest/span.go @@ -24,6 +24,7 @@ import ( "go.opentelemetry.io/otel/trace" ) +// SpanStubs is a slice of SpanStub use for testing an SDK. type SpanStubs []SpanStub // SpanStubsFromReadOnlySpans returns SpanStubs populated from ro. @@ -95,29 +96,29 @@ func SpanStubFromReadOnlySpan(ro tracesdk.ReadOnlySpan) SpanStub { DroppedLinks: ro.DroppedLinks(), ChildSpanCount: ro.ChildSpanCount(), Resource: ro.Resource(), - InstrumentationLibrary: ro.InstrumentationLibrary(), + InstrumentationLibrary: ro.InstrumentationScope(), } } // Snapshot returns a read-only copy of the SpanStub. func (s SpanStub) Snapshot() tracesdk.ReadOnlySpan { return spanSnapshot{ - name: s.Name, - spanContext: s.SpanContext, - parent: s.Parent, - spanKind: s.SpanKind, - startTime: s.StartTime, - endTime: s.EndTime, - attributes: s.Attributes, - events: s.Events, - links: s.Links, - status: s.Status, - droppedAttributes: s.DroppedAttributes, - droppedEvents: s.DroppedEvents, - droppedLinks: s.DroppedLinks, - childSpanCount: s.ChildSpanCount, - resource: s.Resource, - instrumentationLibrary: s.InstrumentationLibrary, + name: s.Name, + spanContext: s.SpanContext, + parent: s.Parent, + spanKind: s.SpanKind, + startTime: s.StartTime, + endTime: s.EndTime, + attributes: s.Attributes, + events: s.Events, + links: s.Links, + status: s.Status, + droppedAttributes: s.DroppedAttributes, + droppedEvents: s.DroppedEvents, + droppedLinks: s.DroppedLinks, + childSpanCount: s.ChildSpanCount, + resource: s.Resource, + instrumentationScope: s.InstrumentationLibrary, } } @@ -125,22 +126,22 @@ type spanSnapshot struct { // Embed the interface to implement the private method. tracesdk.ReadOnlySpan - name string - spanContext trace.SpanContext - parent trace.SpanContext - spanKind trace.SpanKind - startTime time.Time - endTime time.Time - attributes []attribute.KeyValue - events []tracesdk.Event - links []tracesdk.Link - status tracesdk.Status - droppedAttributes int - droppedEvents int - droppedLinks int - childSpanCount int - resource *resource.Resource - instrumentationLibrary instrumentation.Library + name string + spanContext trace.SpanContext + parent trace.SpanContext + spanKind trace.SpanKind + startTime time.Time + endTime time.Time + attributes []attribute.KeyValue + events []tracesdk.Event + links []tracesdk.Link + status tracesdk.Status + droppedAttributes int + droppedEvents int + droppedLinks int + childSpanCount int + resource *resource.Resource + instrumentationScope instrumentation.Scope } func (s spanSnapshot) Name() string { return s.name } @@ -158,6 +159,9 @@ func (s spanSnapshot) DroppedLinks() int { return s.droppedLinks func (s spanSnapshot) DroppedEvents() int { return s.droppedEvents } func (s spanSnapshot) ChildSpanCount() int { return s.childSpanCount } func (s spanSnapshot) Resource() *resource.Resource { return s.resource } +func (s spanSnapshot) InstrumentationScope() instrumentation.Scope { + return s.instrumentationScope +} func (s spanSnapshot) InstrumentationLibrary() instrumentation.Library { - return s.instrumentationLibrary + return s.instrumentationScope } diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go new file mode 100644 index 0000000000000..b580eedeff7fd --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/internal/http.go @@ -0,0 +1,336 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/semconv/internal" + +import ( + "fmt" + "net" + "net/http" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" +) + +// SemanticConventions are the semantic convention values defined for a +// version of the OpenTelemetry specification. +type SemanticConventions struct { + EnduserIDKey attribute.Key + HTTPClientIPKey attribute.Key + HTTPFlavorKey attribute.Key + HTTPHostKey attribute.Key + HTTPMethodKey attribute.Key + HTTPRequestContentLengthKey attribute.Key + HTTPRouteKey attribute.Key + HTTPSchemeHTTP attribute.KeyValue + HTTPSchemeHTTPS attribute.KeyValue + HTTPServerNameKey attribute.Key + HTTPStatusCodeKey attribute.Key + HTTPTargetKey attribute.Key + HTTPURLKey attribute.Key + HTTPUserAgentKey attribute.Key + NetHostIPKey attribute.Key + NetHostNameKey attribute.Key + NetHostPortKey attribute.Key + NetPeerIPKey attribute.Key + NetPeerNameKey attribute.Key + NetPeerPortKey attribute.Key + NetTransportIP attribute.KeyValue + NetTransportOther attribute.KeyValue + NetTransportTCP attribute.KeyValue + NetTransportUDP attribute.KeyValue + NetTransportUnix attribute.KeyValue +} + +// NetAttributesFromHTTPRequest generates attributes of the net +// namespace as specified by the OpenTelemetry specification for a +// span. The network parameter is a string that net.Dial function +// from standard library can understand. +func (sc *SemanticConventions) NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} + + switch network { + case "tcp", "tcp4", "tcp6": + attrs = append(attrs, sc.NetTransportTCP) + case "udp", "udp4", "udp6": + attrs = append(attrs, sc.NetTransportUDP) + case "ip", "ip4", "ip6": + attrs = append(attrs, sc.NetTransportIP) + case "unix", "unixgram", "unixpacket": + attrs = append(attrs, sc.NetTransportUnix) + default: + attrs = append(attrs, sc.NetTransportOther) + } + + peerIP, peerName, peerPort := hostIPNamePort(request.RemoteAddr) + if peerIP != "" { + attrs = append(attrs, sc.NetPeerIPKey.String(peerIP)) + } + if peerName != "" { + attrs = append(attrs, sc.NetPeerNameKey.String(peerName)) + } + if peerPort != 0 { + attrs = append(attrs, sc.NetPeerPortKey.Int(peerPort)) + } + + hostIP, hostName, hostPort := "", "", 0 + for _, someHost := range []string{request.Host, request.Header.Get("Host"), request.URL.Host} { + hostIP, hostName, hostPort = hostIPNamePort(someHost) + if hostIP != "" || hostName != "" || hostPort != 0 { + break + } + } + if hostIP != "" { + attrs = append(attrs, sc.NetHostIPKey.String(hostIP)) + } + if hostName != "" { + attrs = append(attrs, sc.NetHostNameKey.String(hostName)) + } + if hostPort != 0 { + attrs = append(attrs, sc.NetHostPortKey.Int(hostPort)) + } + + return attrs +} + +// hostIPNamePort extracts the IP address, name and (optional) port from hostWithPort. +// It handles both IPv4 and IPv6 addresses. If the host portion is not recognized +// as a valid IPv4 or IPv6 address, the `ip` result will be empty and the +// host portion will instead be returned in `name`. +func hostIPNamePort(hostWithPort string) (ip string, name string, port int) { + var ( + hostPart, portPart string + parsedPort uint64 + err error + ) + if hostPart, portPart, err = net.SplitHostPort(hostWithPort); err != nil { + hostPart, portPart = hostWithPort, "" + } + if parsedIP := net.ParseIP(hostPart); parsedIP != nil { + ip = parsedIP.String() + } else { + name = hostPart + } + if parsedPort, err = strconv.ParseUint(portPart, 10, 16); err == nil { + port = int(parsedPort) + } + return +} + +// EndUserAttributesFromHTTPRequest generates attributes of the +// enduser namespace as specified by the OpenTelemetry specification +// for a span. +func (sc *SemanticConventions) EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + if username, _, ok := request.BasicAuth(); ok { + return []attribute.KeyValue{sc.EnduserIDKey.String(username)} + } + return nil +} + +// HTTPClientAttributesFromHTTPRequest generates attributes of the +// http namespace as specified by the OpenTelemetry specification for +// a span on the client side. +func (sc *SemanticConventions) HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} + + // remove any username/password info that may be in the URL + // before adding it to the attributes + userinfo := request.URL.User + request.URL.User = nil + + attrs = append(attrs, sc.HTTPURLKey.String(request.URL.String())) + + // restore any username/password info that was removed + request.URL.User = userinfo + + return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...) +} + +func (sc *SemanticConventions) httpCommonAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} + if ua := request.UserAgent(); ua != "" { + attrs = append(attrs, sc.HTTPUserAgentKey.String(ua)) + } + if request.ContentLength > 0 { + attrs = append(attrs, sc.HTTPRequestContentLengthKey.Int64(request.ContentLength)) + } + + return append(attrs, sc.httpBasicAttributesFromHTTPRequest(request)...) +} + +func (sc *SemanticConventions) httpBasicAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + // as these attributes are used by HTTPServerMetricAttributesFromHTTPRequest, they should be low-cardinality + attrs := []attribute.KeyValue{} + + if request.TLS != nil { + attrs = append(attrs, sc.HTTPSchemeHTTPS) + } else { + attrs = append(attrs, sc.HTTPSchemeHTTP) + } + + if request.Host != "" { + attrs = append(attrs, sc.HTTPHostKey.String(request.Host)) + } else if request.URL != nil && request.URL.Host != "" { + attrs = append(attrs, sc.HTTPHostKey.String(request.URL.Host)) + } + + flavor := "" + if request.ProtoMajor == 1 { + flavor = fmt.Sprintf("1.%d", request.ProtoMinor) + } else if request.ProtoMajor == 2 { + flavor = "2" + } + if flavor != "" { + attrs = append(attrs, sc.HTTPFlavorKey.String(flavor)) + } + + if request.Method != "" { + attrs = append(attrs, sc.HTTPMethodKey.String(request.Method)) + } else { + attrs = append(attrs, sc.HTTPMethodKey.String(http.MethodGet)) + } + + return attrs +} + +// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes +// to be used with server-side HTTP metrics. +func (sc *SemanticConventions) HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} + if serverName != "" { + attrs = append(attrs, sc.HTTPServerNameKey.String(serverName)) + } + return append(attrs, sc.httpBasicAttributesFromHTTPRequest(request)...) +} + +// HTTPServerAttributesFromHTTPRequest generates attributes of the +// http namespace as specified by the OpenTelemetry specification for +// a span on the server side. Currently, only basic authentication is +// supported. +func (sc *SemanticConventions) HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{ + sc.HTTPTargetKey.String(request.RequestURI), + } + + if serverName != "" { + attrs = append(attrs, sc.HTTPServerNameKey.String(serverName)) + } + if route != "" { + attrs = append(attrs, sc.HTTPRouteKey.String(route)) + } + if values, ok := request.Header["X-Forwarded-For"]; ok && len(values) > 0 { + if addresses := strings.SplitN(values[0], ",", 2); len(addresses) > 0 { + attrs = append(attrs, sc.HTTPClientIPKey.String(addresses[0])) + } + } + + return append(attrs, sc.httpCommonAttributesFromHTTPRequest(request)...) +} + +// HTTPAttributesFromHTTPStatusCode generates attributes of the http +// namespace as specified by the OpenTelemetry specification for a +// span. +func (sc *SemanticConventions) HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { + attrs := []attribute.KeyValue{ + sc.HTTPStatusCodeKey.Int(code), + } + return attrs +} + +type codeRange struct { + fromInclusive int + toInclusive int +} + +func (r codeRange) contains(code int) bool { + return r.fromInclusive <= code && code <= r.toInclusive +} + +var validRangesPerCategory = map[int][]codeRange{ + 1: { + {http.StatusContinue, http.StatusEarlyHints}, + }, + 2: { + {http.StatusOK, http.StatusAlreadyReported}, + {http.StatusIMUsed, http.StatusIMUsed}, + }, + 3: { + {http.StatusMultipleChoices, http.StatusUseProxy}, + {http.StatusTemporaryRedirect, http.StatusPermanentRedirect}, + }, + 4: { + {http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful… + {http.StatusMisdirectedRequest, http.StatusUpgradeRequired}, + {http.StatusPreconditionRequired, http.StatusTooManyRequests}, + {http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge}, + {http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons}, + }, + 5: { + {http.StatusInternalServerError, http.StatusLoopDetected}, + {http.StatusNotExtended, http.StatusNetworkAuthenticationRequired}, + }, +} + +// SpanStatusFromHTTPStatusCode generates a status code and a message +// as specified by the OpenTelemetry specification for a span. +func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { + spanCode, valid := validateHTTPStatusCode(code) + if !valid { + return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) + } + return spanCode, "" +} + +// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message +// as specified by the OpenTelemetry specification for a span. +// Exclude 4xx for SERVER to set the appropriate status. +func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { + spanCode, valid := validateHTTPStatusCode(code) + if !valid { + return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) + } + category := code / 100 + if spanKind == trace.SpanKindServer && category == 4 { + return codes.Unset, "" + } + return spanCode, "" +} + +// validateHTTPStatusCode validates the HTTP status code and returns +// corresponding span status code. If the `code` is not a valid HTTP status +// code, returns span status Error and false. +func validateHTTPStatusCode(code int) (codes.Code, bool) { + category := code / 100 + ranges, ok := validRangesPerCategory[category] + if !ok { + return codes.Error, false + } + ok = false + for _, crange := range ranges { + ok = crange.contains(code) + if ok { + break + } + } + if !ok { + return codes.Error, false + } + if category > 0 && category < 4 { + return codes.Unset, true + } + return codes.Error, true +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go new file mode 100644 index 0000000000000..c7c47fa881518 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/http.go @@ -0,0 +1,405 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/semconv/internal/v2" + +import ( + "fmt" + "net/http" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" +) + +// HTTPConv are the HTTP semantic convention attributes defined for a version +// of the OpenTelemetry specification. +type HTTPConv struct { + NetConv *NetConv + + EnduserIDKey attribute.Key + HTTPClientIPKey attribute.Key + HTTPFlavorKey attribute.Key + HTTPMethodKey attribute.Key + HTTPRequestContentLengthKey attribute.Key + HTTPResponseContentLengthKey attribute.Key + HTTPRouteKey attribute.Key + HTTPSchemeHTTP attribute.KeyValue + HTTPSchemeHTTPS attribute.KeyValue + HTTPStatusCodeKey attribute.Key + HTTPTargetKey attribute.Key + HTTPURLKey attribute.Key + HTTPUserAgentKey attribute.Key +} + +// ClientResponse returns attributes for an HTTP response received by a client +// from a server. The following attributes are returned if the related values +// are defined in resp: "http.status.code", "http.response_content_length". +// +// This does not add all OpenTelemetry required attributes for an HTTP event, +// it assumes ClientRequest was used to create the span with a complete set of +// attributes. If a complete set of attributes can be generated using the +// request contained in resp. For example: +// +// append(ClientResponse(resp), ClientRequest(resp.Request)...) +func (c *HTTPConv) ClientResponse(resp *http.Response) []attribute.KeyValue { + var n int + if resp.StatusCode > 0 { + n++ + } + if resp.ContentLength > 0 { + n++ + } + + attrs := make([]attribute.KeyValue, 0, n) + if resp.StatusCode > 0 { + attrs = append(attrs, c.HTTPStatusCodeKey.Int(resp.StatusCode)) + } + if resp.ContentLength > 0 { + attrs = append(attrs, c.HTTPResponseContentLengthKey.Int(int(resp.ContentLength))) + } + return attrs +} + +// ClientRequest returns attributes for an HTTP request made by a client. The +// following attributes are always returned: "http.url", "http.flavor", +// "http.method", "net.peer.name". The following attributes are returned if the +// related values are defined in req: "net.peer.port", "http.user_agent", +// "http.request_content_length", "enduser.id". +func (c *HTTPConv) ClientRequest(req *http.Request) []attribute.KeyValue { + n := 3 // URL, peer name, proto, and method. + var h string + if req.URL != nil { + h = req.URL.Host + } + peer, p := firstHostPort(h, req.Header.Get("Host")) + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", p) + if port > 0 { + n++ + } + useragent := req.UserAgent() + if useragent != "" { + n++ + } + if req.ContentLength > 0 { + n++ + } + userID, _, hasUserID := req.BasicAuth() + if hasUserID { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.method(req.Method)) + attrs = append(attrs, c.proto(req.Proto)) + + var u string + if req.URL != nil { + // Remove any username/password info that may be in the URL. + userinfo := req.URL.User + req.URL.User = nil + u = req.URL.String() + // Restore any username/password info that was removed. + req.URL.User = userinfo + } + attrs = append(attrs, c.HTTPURLKey.String(u)) + + attrs = append(attrs, c.NetConv.PeerName(peer)) + if port > 0 { + attrs = append(attrs, c.NetConv.PeerPort(port)) + } + + if useragent != "" { + attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) + } + + if l := req.ContentLength; l > 0 { + attrs = append(attrs, c.HTTPRequestContentLengthKey.Int64(l)) + } + + if hasUserID { + attrs = append(attrs, c.EnduserIDKey.String(userID)) + } + + return attrs +} + +// ServerRequest returns attributes for an HTTP request received by a server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "http.target", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port", +// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", +// "http.client_ip". +func (c *HTTPConv) ServerRequest(server string, req *http.Request) []attribute.KeyValue { + n := 5 // Method, scheme, target, proto, and host name. + var host string + var p int + if server == "" { + host, p = splitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = splitHostPort(server) + if p < 0 { + _, p = splitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + n++ + } + peer, peerPort := splitHostPort(req.RemoteAddr) + if peer != "" { + n++ + if peerPort > 0 { + n++ + } + } + useragent := req.UserAgent() + if useragent != "" { + n++ + } + userID, _, hasUserID := req.BasicAuth() + if hasUserID { + n++ + } + clientIP := serverClientIP(req.Header.Get("X-Forwarded-For")) + if clientIP != "" { + n++ + } + attrs := make([]attribute.KeyValue, 0, n) + + attrs = append(attrs, c.method(req.Method)) + attrs = append(attrs, c.scheme(req.TLS != nil)) + attrs = append(attrs, c.proto(req.Proto)) + attrs = append(attrs, c.NetConv.HostName(host)) + + if req.URL != nil { + attrs = append(attrs, c.HTTPTargetKey.String(req.URL.RequestURI())) + } else { + // This should never occur if the request was generated by the net/http + // package. Fail gracefully, if it does though. + attrs = append(attrs, c.HTTPTargetKey.String(req.RequestURI)) + } + + if hostPort > 0 { + attrs = append(attrs, c.NetConv.HostPort(hostPort)) + } + + if peer != "" { + // The Go HTTP server sets RemoteAddr to "IP:port", this will not be a + // file-path that would be interpreted with a sock family. + attrs = append(attrs, c.NetConv.SockPeerAddr(peer)) + if peerPort > 0 { + attrs = append(attrs, c.NetConv.SockPeerPort(peerPort)) + } + } + + if useragent != "" { + attrs = append(attrs, c.HTTPUserAgentKey.String(useragent)) + } + + if hasUserID { + attrs = append(attrs, c.EnduserIDKey.String(userID)) + } + + if clientIP != "" { + attrs = append(attrs, c.HTTPClientIPKey.String(clientIP)) + } + + return attrs +} + +func (c *HTTPConv) method(method string) attribute.KeyValue { + if method == "" { + return c.HTTPMethodKey.String(http.MethodGet) + } + return c.HTTPMethodKey.String(method) +} + +func (c *HTTPConv) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return c.HTTPSchemeHTTPS + } + return c.HTTPSchemeHTTP +} + +func (c *HTTPConv) proto(proto string) attribute.KeyValue { + switch proto { + case "HTTP/1.0": + return c.HTTPFlavorKey.String("1.0") + case "HTTP/1.1": + return c.HTTPFlavorKey.String("1.1") + case "HTTP/2": + return c.HTTPFlavorKey.String("2.0") + case "HTTP/3": + return c.HTTPFlavorKey.String("3.0") + default: + return c.HTTPFlavorKey.String(proto) + } +} + +func serverClientIP(xForwardedFor string) string { + if idx := strings.Index(xForwardedFor, ","); idx >= 0 { + xForwardedFor = xForwardedFor[:idx] + } + return xForwardedFor +} + +func requiredHTTPPort(https bool, port int) int { // nolint:revive + if https { + if port > 0 && port != 443 { + return port + } + } else { + if port > 0 && port != 80 { + return port + } + } + return -1 +} + +// Return the request host and port from the first non-empty source. +func firstHostPort(source ...string) (host string, port int) { + for _, hostport := range source { + host, port = splitHostPort(hostport) + if host != "" || port > 0 { + break + } + } + return +} + +// RequestHeader returns the contents of h as OpenTelemetry attributes. +func (c *HTTPConv) RequestHeader(h http.Header) []attribute.KeyValue { + return c.header("http.request.header", h) +} + +// ResponseHeader returns the contents of h as OpenTelemetry attributes. +func (c *HTTPConv) ResponseHeader(h http.Header) []attribute.KeyValue { + return c.header("http.response.header", h) +} + +func (c *HTTPConv) header(prefix string, h http.Header) []attribute.KeyValue { + key := func(k string) attribute.Key { + k = strings.ToLower(k) + k = strings.ReplaceAll(k, "-", "_") + k = fmt.Sprintf("%s.%s", prefix, k) + return attribute.Key(k) + } + + attrs := make([]attribute.KeyValue, 0, len(h)) + for k, v := range h { + attrs = append(attrs, key(k).StringSlice(v)) + } + return attrs +} + +// ClientStatus returns a span status code and message for an HTTP status code +// value received by a client. +func (c *HTTPConv) ClientStatus(code int) (codes.Code, string) { + stat, valid := validateHTTPStatusCode(code) + if !valid { + return stat, fmt.Sprintf("Invalid HTTP status code %d", code) + } + return stat, "" +} + +// ServerStatus returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func (c *HTTPConv) ServerStatus(code int) (codes.Code, string) { + stat, valid := validateHTTPStatusCode(code) + if !valid { + return stat, fmt.Sprintf("Invalid HTTP status code %d", code) + } + + if code/100 == 4 { + return codes.Unset, "" + } + return stat, "" +} + +type codeRange struct { + fromInclusive int + toInclusive int +} + +func (r codeRange) contains(code int) bool { + return r.fromInclusive <= code && code <= r.toInclusive +} + +var validRangesPerCategory = map[int][]codeRange{ + 1: { + {http.StatusContinue, http.StatusEarlyHints}, + }, + 2: { + {http.StatusOK, http.StatusAlreadyReported}, + {http.StatusIMUsed, http.StatusIMUsed}, + }, + 3: { + {http.StatusMultipleChoices, http.StatusUseProxy}, + {http.StatusTemporaryRedirect, http.StatusPermanentRedirect}, + }, + 4: { + {http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful… + {http.StatusMisdirectedRequest, http.StatusUpgradeRequired}, + {http.StatusPreconditionRequired, http.StatusTooManyRequests}, + {http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge}, + {http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons}, + }, + 5: { + {http.StatusInternalServerError, http.StatusLoopDetected}, + {http.StatusNotExtended, http.StatusNetworkAuthenticationRequired}, + }, +} + +// validateHTTPStatusCode validates the HTTP status code and returns +// corresponding span status code. If the `code` is not a valid HTTP status +// code, returns span status Error and false. +func validateHTTPStatusCode(code int) (codes.Code, bool) { + category := code / 100 + ranges, ok := validRangesPerCategory[category] + if !ok { + return codes.Error, false + } + ok = false + for _, crange := range ranges { + ok = crange.contains(code) + if ok { + break + } + } + if !ok { + return codes.Error, false + } + if category > 0 && category < 4 { + return codes.Unset, true + } + return codes.Error, true +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go new file mode 100644 index 0000000000000..4a711133a0269 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/internal/v2/net.go @@ -0,0 +1,324 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/semconv/internal/v2" + +import ( + "net" + "strconv" + "strings" + + "go.opentelemetry.io/otel/attribute" +) + +// NetConv are the network semantic convention attributes defined for a version +// of the OpenTelemetry specification. +type NetConv struct { + NetHostNameKey attribute.Key + NetHostPortKey attribute.Key + NetPeerNameKey attribute.Key + NetPeerPortKey attribute.Key + NetSockFamilyKey attribute.Key + NetSockPeerAddrKey attribute.Key + NetSockPeerPortKey attribute.Key + NetSockHostAddrKey attribute.Key + NetSockHostPortKey attribute.Key + NetTransportOther attribute.KeyValue + NetTransportTCP attribute.KeyValue + NetTransportUDP attribute.KeyValue + NetTransportInProc attribute.KeyValue +} + +func (c *NetConv) Transport(network string) attribute.KeyValue { + switch network { + case "tcp", "tcp4", "tcp6": + return c.NetTransportTCP + case "udp", "udp4", "udp6": + return c.NetTransportUDP + case "unix", "unixgram", "unixpacket": + return c.NetTransportInProc + default: + // "ip:*", "ip4:*", and "ip6:*" all are considered other. + return c.NetTransportOther + } +} + +// Host returns attributes for a network host address. +func (c *NetConv) Host(address string) []attribute.KeyValue { + h, p := splitHostPort(address) + var n int + if h != "" { + n++ + if p > 0 { + n++ + } + } + + if n == 0 { + return nil + } + + attrs := make([]attribute.KeyValue, 0, n) + attrs = append(attrs, c.HostName(h)) + if p > 0 { + attrs = append(attrs, c.HostPort(int(p))) + } + return attrs +} + +// Server returns attributes for a network listener listening at address. See +// net.Listen for information about acceptable address values, address should +// be the same as the one used to create ln. If ln is nil, only network host +// attributes will be returned that describe address. Otherwise, the socket +// level information about ln will also be included. +func (c *NetConv) Server(address string, ln net.Listener) []attribute.KeyValue { + if ln == nil { + return c.Host(address) + } + + lAddr := ln.Addr() + if lAddr == nil { + return c.Host(address) + } + + hostName, hostPort := splitHostPort(address) + sockHostAddr, sockHostPort := splitHostPort(lAddr.String()) + network := lAddr.Network() + sockFamily := family(network, sockHostAddr) + + n := nonZeroStr(hostName, network, sockHostAddr, sockFamily) + n += positiveInt(hostPort, sockHostPort) + attr := make([]attribute.KeyValue, 0, n) + if hostName != "" { + attr = append(attr, c.HostName(hostName)) + if hostPort > 0 { + // Only if net.host.name is set should net.host.port be. + attr = append(attr, c.HostPort(hostPort)) + } + } + if network != "" { + attr = append(attr, c.Transport(network)) + } + if sockFamily != "" { + attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) + } + if sockHostAddr != "" { + attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) + if sockHostPort > 0 { + // Only if net.sock.host.addr is set should net.sock.host.port be. + attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) + } + } + return attr +} + +func (c *NetConv) HostName(name string) attribute.KeyValue { + return c.NetHostNameKey.String(name) +} + +func (c *NetConv) HostPort(port int) attribute.KeyValue { + return c.NetHostPortKey.Int(port) +} + +// Client returns attributes for a client network connection to address. See +// net.Dial for information about acceptable address values, address should be +// the same as the one used to create conn. If conn is nil, only network peer +// attributes will be returned that describe address. Otherwise, the socket +// level information about conn will also be included. +func (c *NetConv) Client(address string, conn net.Conn) []attribute.KeyValue { + if conn == nil { + return c.Peer(address) + } + + lAddr, rAddr := conn.LocalAddr(), conn.RemoteAddr() + + var network string + switch { + case lAddr != nil: + network = lAddr.Network() + case rAddr != nil: + network = rAddr.Network() + default: + return c.Peer(address) + } + + peerName, peerPort := splitHostPort(address) + var ( + sockFamily string + sockPeerAddr string + sockPeerPort int + sockHostAddr string + sockHostPort int + ) + + if lAddr != nil { + sockHostAddr, sockHostPort = splitHostPort(lAddr.String()) + } + + if rAddr != nil { + sockPeerAddr, sockPeerPort = splitHostPort(rAddr.String()) + } + + switch { + case sockHostAddr != "": + sockFamily = family(network, sockHostAddr) + case sockPeerAddr != "": + sockFamily = family(network, sockPeerAddr) + } + + n := nonZeroStr(peerName, network, sockPeerAddr, sockHostAddr, sockFamily) + n += positiveInt(peerPort, sockPeerPort, sockHostPort) + attr := make([]attribute.KeyValue, 0, n) + if peerName != "" { + attr = append(attr, c.PeerName(peerName)) + if peerPort > 0 { + // Only if net.peer.name is set should net.peer.port be. + attr = append(attr, c.PeerPort(peerPort)) + } + } + if network != "" { + attr = append(attr, c.Transport(network)) + } + if sockFamily != "" { + attr = append(attr, c.NetSockFamilyKey.String(sockFamily)) + } + if sockPeerAddr != "" { + attr = append(attr, c.NetSockPeerAddrKey.String(sockPeerAddr)) + if sockPeerPort > 0 { + // Only if net.sock.peer.addr is set should net.sock.peer.port be. + attr = append(attr, c.NetSockPeerPortKey.Int(sockPeerPort)) + } + } + if sockHostAddr != "" { + attr = append(attr, c.NetSockHostAddrKey.String(sockHostAddr)) + if sockHostPort > 0 { + // Only if net.sock.host.addr is set should net.sock.host.port be. + attr = append(attr, c.NetSockHostPortKey.Int(sockHostPort)) + } + } + return attr +} + +func family(network, address string) string { + switch network { + case "unix", "unixgram", "unixpacket": + return "unix" + default: + if ip := net.ParseIP(address); ip != nil { + if ip.To4() == nil { + return "inet6" + } + return "inet" + } + } + return "" +} + +func nonZeroStr(strs ...string) int { + var n int + for _, str := range strs { + if str != "" { + n++ + } + } + return n +} + +func positiveInt(ints ...int) int { + var n int + for _, i := range ints { + if i > 0 { + n++ + } + } + return n +} + +// Peer returns attributes for a network peer address. +func (c *NetConv) Peer(address string) []attribute.KeyValue { + h, p := splitHostPort(address) + var n int + if h != "" { + n++ + if p > 0 { + n++ + } + } + + if n == 0 { + return nil + } + + attrs := make([]attribute.KeyValue, 0, n) + attrs = append(attrs, c.PeerName(h)) + if p > 0 { + attrs = append(attrs, c.PeerPort(int(p))) + } + return attrs +} + +func (c *NetConv) PeerName(name string) attribute.KeyValue { + return c.NetPeerNameKey.String(name) +} + +func (c *NetConv) PeerPort(port int) attribute.KeyValue { + return c.NetPeerPortKey.Int(port) +} + +func (c *NetConv) SockPeerAddr(addr string) attribute.KeyValue { + return c.NetSockPeerAddrKey.String(addr) +} + +func (c *NetConv) SockPeerPort(port int) attribute.KeyValue { + return c.NetSockPeerPortKey.Int(port) +} + +// splitHostPort splits a network address hostport of the form "host", +// "host%zone", "[host]", "[host%zone], "host:port", "host%zone:port", +// "[host]:port", "[host%zone]:port", or ":port" into host or host%zone and +// port. +// +// An empty host is returned if it is not provided or unparsable. A negative +// port is returned if it is not provided or unparsable. +func splitHostPort(hostport string) (host string, port int) { + port = -1 + + if strings.HasPrefix(hostport, "[") { + addrEnd := strings.LastIndex(hostport, "]") + if addrEnd < 0 { + // Invalid hostport. + return + } + if i := strings.LastIndex(hostport[addrEnd:], ":"); i < 0 { + host = hostport[1:addrEnd] + return + } + } else { + if i := strings.LastIndex(hostport, ":"); i < 0 { + host = hostport + return + } + } + + host, pStr, err := net.SplitHostPort(hostport) + if err != nil { + return + } + + p, err := strconv.ParseUint(pStr, 10, 16) + if err != nil { + return + } + return host, int(p) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/number/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/doc.go similarity index 60% rename from vendor/go.opentelemetry.io/otel/metric/number/doc.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.12.0/doc.go index 0649ff875e70a..181fcc9c520e1 100644 --- a/vendor/go.opentelemetry.io/otel/metric/number/doc.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/doc.go @@ -12,12 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -/* -Package number provides a number abstraction for instruments that -either support int64 or float64 input values. - -This package is currently in a pre-GA phase. Backwards incompatible changes -may be introduced in subsequent minor version releases as we work to track the -evolving OpenTelemetry specification and user feedback. -*/ -package number // import "go.opentelemetry.io/otel/metric/number" +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the conventions +// as of the v1.12.0 version of the OpenTelemetry specification. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go new file mode 100644 index 0000000000000..d6892709437c0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/exception.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go new file mode 100644 index 0000000000000..4b4f3cbaf0b82 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/http.go @@ -0,0 +1,114 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" + +import ( + "net/http" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/semconv/internal" + "go.opentelemetry.io/otel/trace" +) + +// HTTP scheme attributes. +var ( + HTTPSchemeHTTP = HTTPSchemeKey.String("http") + HTTPSchemeHTTPS = HTTPSchemeKey.String("https") +) + +var sc = &internal.SemanticConventions{ + EnduserIDKey: EnduserIDKey, + HTTPClientIPKey: HTTPClientIPKey, + HTTPFlavorKey: HTTPFlavorKey, + HTTPHostKey: HTTPHostKey, + HTTPMethodKey: HTTPMethodKey, + HTTPRequestContentLengthKey: HTTPRequestContentLengthKey, + HTTPRouteKey: HTTPRouteKey, + HTTPSchemeHTTP: HTTPSchemeHTTP, + HTTPSchemeHTTPS: HTTPSchemeHTTPS, + HTTPServerNameKey: HTTPServerNameKey, + HTTPStatusCodeKey: HTTPStatusCodeKey, + HTTPTargetKey: HTTPTargetKey, + HTTPURLKey: HTTPURLKey, + HTTPUserAgentKey: HTTPUserAgentKey, + NetHostIPKey: NetHostIPKey, + NetHostNameKey: NetHostNameKey, + NetHostPortKey: NetHostPortKey, + NetPeerIPKey: NetPeerIPKey, + NetPeerNameKey: NetPeerNameKey, + NetPeerPortKey: NetPeerPortKey, + NetTransportIP: NetTransportIP, + NetTransportOther: NetTransportOther, + NetTransportTCP: NetTransportTCP, + NetTransportUDP: NetTransportUDP, + NetTransportUnix: NetTransportUnix, +} + +// NetAttributesFromHTTPRequest generates attributes of the net +// namespace as specified by the OpenTelemetry specification for a +// span. The network parameter is a string that net.Dial function +// from standard library can understand. +func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { + return sc.NetAttributesFromHTTPRequest(network, request) +} + +// EndUserAttributesFromHTTPRequest generates attributes of the +// enduser namespace as specified by the OpenTelemetry specification +// for a span. +func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + return sc.EndUserAttributesFromHTTPRequest(request) +} + +// HTTPClientAttributesFromHTTPRequest generates attributes of the +// http namespace as specified by the OpenTelemetry specification for +// a span on the client side. +func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + return sc.HTTPClientAttributesFromHTTPRequest(request) +} + +// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes +// to be used with server-side HTTP metrics. +func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { + return sc.HTTPServerMetricAttributesFromHTTPRequest(serverName, request) +} + +// HTTPServerAttributesFromHTTPRequest generates attributes of the +// http namespace as specified by the OpenTelemetry specification for +// a span on the server side. Currently, only basic authentication is +// supported. +func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { + return sc.HTTPServerAttributesFromHTTPRequest(serverName, route, request) +} + +// HTTPAttributesFromHTTPStatusCode generates attributes of the http +// namespace as specified by the OpenTelemetry specification for a +// span. +func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { + return sc.HTTPAttributesFromHTTPStatusCode(code) +} + +// SpanStatusFromHTTPStatusCode generates a status code and a message +// as specified by the OpenTelemetry specification for a span. +func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { + return internal.SpanStatusFromHTTPStatusCode(code) +} + +// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message +// as specified by the OpenTelemetry specification for a span. +// Exclude 4xx for SERVER to set the appropriate status. +func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { + return internal.SpanStatusFromHTTPStatusCodeAndSpanKind(code, spanKind) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go new file mode 100644 index 0000000000000..b2155676f45ee --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/resource.go @@ -0,0 +1,1042 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" + +import "go.opentelemetry.io/otel/attribute" + +// The web browser in which the application represented by the resource is running. The `browser.*` attributes MUST be used only for resources that represent applications running in a web browser (regardless of whether running on a mobile or desktop device). +const ( + // Array of brand name and version separated by a space + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (navigator.userAgentData.brands). + BrowserBrandsKey = attribute.Key("browser.brands") + // The platform on which the browser is running + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (navigator.userAgentData.platform). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD + // be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in the + // [os.type and os.name attributes](./os.md). However, for consistency, the values + // in the `browser.platform` attribute should capture the exact value that the + // user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") + // Full user-agent string provided by the browser + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 + // (KHTML, ' + // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' + // Note: The user-agent value SHOULD be provided only from browsers that do not + // have a mechanism to retrieve brands and platform individually from the User- + // Agent Client Hints API. To retrieve the value, the legacy `navigator.userAgent` + // API can be used. + BrowserUserAgentKey = attribute.Key("browser.user_agent") +) + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // Name of the cloud provider. + // + // Type: Enum + // Required: No + // Stability: stable + CloudProviderKey = attribute.Key("cloud.provider") + // The cloud account ID the resource is assigned to. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + // The geographical region the resource is running. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for example + // [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc- + // detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global- + // infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en- + // us/global-infrastructure/geographies/), [Google Cloud + // regions](https://cloud.google.com/about/locations), or [Tencent Cloud + // regions](https://intl.cloud.tencent.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + // Cloud regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the resource + // is running. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + // The cloud platform in use. + // + // Type: Enum + // Required: No + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws. + // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo + // perguide/clusters.html). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l + // aunch_types.html) for an ECS task. + // + // Type: Enum + // Required: No + // Stability: stable + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates + // t/developerguide/task_definitions.html). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + // The task definition family this task definition is a member of. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + // The revision for this task definition. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // The ARN of an EKS cluster. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// Resources specific to Amazon Web Services. +const ( + // The name(s) of the AWS log group(s) an application is writing to. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each write + // to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + // The Amazon Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- + // access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + // The name(s) of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + // The ARN(s) of the AWS log stream(s). + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- + // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- + // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain + // several log streams, so these ARNs necessarily identify both a log group and a + // log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") +) + +// A container instance. +const ( + // Container name used by container runtime. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + // Container ID. Usually a UUID, as for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container- + // identification). The UUID might be abbreviated. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + // The container runtime managing this container. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") + // Name of the image the container was built on. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + // Container image tag. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0.1' + ContainerImageTagKey = attribute.Key("container.image.tag") +) + +// The software deployment. +const ( + // Name of the [deployment + // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'staging', 'production' + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// The device on which the process represented by this resource is running. +const ( + // A unique identifier representing the device + // + // Type: string + // Required: No + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values outlined + // below. This value is not an advertising identifier and MUST NOT be used as + // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id + // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden + // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the + // Firebase Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on best + // practices and exact implementation details. Caution should be taken when + // storing personal data or anything which can identify a user. GDPR and data + // protection laws may apply, ensure you do your own due diligence. + DeviceIDKey = attribute.Key("device.id") + // The model identifier for the device + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version of the + // model identifier rather than the market or consumer-friendly name of the + // device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + // The marketing name for the device model + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of the + // device model rather than a machine readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") + // The name of the device manufacturer + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") +) + +// A serverless instance. +const ( + // The name of the single function that this runtime instance executes. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span- + // general.md#source-code-attributes) + // span attributes). + + // For some cloud providers, the above definition is ambiguous. The following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud providers/products: + + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `faas.id` attribute). + FaaSNameKey = attribute.Key("faas.name") + // The unique ID of the single function that this runtime instance executes. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' + // Note: On some cloud providers, it may not be possible to determine the full ID + // at startup, + // so consider setting `faas.id` as a span attribute instead. + + // The exact value to use for `faas.id` depends on the cloud provider: + + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and- + // namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration- + // aliases.html) + // with the resolved function version, as the same runtime instance may be + // invokable with + // multiple different aliases. + // * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full- + // resource-names) + // * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en- + // us/rest/api/resources/resources/get-by-id) of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.We + // b/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function app can + // host multiple functions that would usually share + // a TracerProvider. + FaaSIDKey = attribute.Key("faas.id") + // The immutable version of the function being executed. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration- + // versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run:** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env- + // var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") + // The execution environment ID as a string, that will be potentially reused for + // other invocations to the same function/function version. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + // The amount of memory available to the serverless function in MiB. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 128 + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this + // information. + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") +) + +// A host is defined as a general computing instance. +const ( + // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud + // provider. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-test' + HostIDKey = attribute.Key("host.id") + // Name of the host. On Unix systems, it may contain what the hostname command + // returns, or the fully qualified hostname, or another name specified by the + // user. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + // Type of host. For Cloud, this must be the machine type. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") + // The CPU architecture the host system is running on. + // + // Type: Enum + // Required: No + // Stability: stable + HostArchKey = attribute.Key("host.arch") + // Name of the VM image or OS install the host was instantiated from. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + // VM image ID. For Cloud, this value is from the provider. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + // The version string of the VM image as defined in [Version + // Attributes](README.md#version-attributes). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// A Kubernetes Cluster. +const ( + // The name of the cluster. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") +) + +// A Kubernetes Node object. +const ( + // The name of the Node. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + // The UID of the Node. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") +) + +// A Kubernetes Namespace. +const ( + // The name of the namespace that the pod is running in. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") +) + +// A Kubernetes Pod object. +const ( + // The UID of the Pod. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + // The name of the Pod. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") +) + +// A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // The name of the Container from Pod specification, must be unique within a Pod. + // Container runtime usually uses different globally unique name + // (`container.name`). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + // Number of times the container was restarted. This attribute can be used to + // identify a particular container (running or stopped) within a container spec. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") +) + +// A Kubernetes ReplicaSet object. +const ( + // The UID of the ReplicaSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + // The name of the ReplicaSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") +) + +// A Kubernetes Deployment object. +const ( + // The UID of the Deployment. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + // The name of the Deployment. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") +) + +// A Kubernetes StatefulSet object. +const ( + // The UID of the StatefulSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + // The name of the StatefulSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") +) + +// A Kubernetes DaemonSet object. +const ( + // The UID of the DaemonSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + // The name of the DaemonSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") +) + +// A Kubernetes Job object. +const ( + // The UID of the Job. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + // The name of the Job. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") +) + +// A Kubernetes CronJob object. +const ( + // The UID of the CronJob. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + // The name of the CronJob. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") +) + +// The operating system (OS) on which the process represented by this resource is running. +const ( + // The operating system type. + // + // Type: Enum + // Required: Always + // Stability: stable + OSTypeKey = attribute.Key("os.type") + // Human readable (not intended to be parsed) OS version information, like e.g. + // reported by `ver` or `lsb_release -a` commands. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' + OSDescriptionKey = attribute.Key("os.description") + // Human readable operating system name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + // The version string of the operating system as defined in [Version + // Attributes](../../resource/semantic_conventions/README.md#version-attributes). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// An operating system process. +const ( + // Process identifier (PID). + // + // Type: int + // Required: No + // Stability: stable + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + // The name of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of + // `GetProcessImageFileNameW`. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + // The full path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + // The command used to launch the process (i.e. the command name). On Linux based + // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, + // can be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + // The full command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not + // set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + // All the command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited strings + // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be + // the full argv vector passed to `main`. + // + // Type: string[] + // Required: See below + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + // The username of the user that owns the process. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") +) + +// The single (language) runtime instance which is monitored. +const ( + // The name of the runtime of this process. For compiled native binaries, this + // SHOULD be the name of the compiler. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + // The version of the runtime of this process, as returned by the runtime without + // modification. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + // An additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") +) + +// A service instance. +const ( + // Logical name of the service. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to `unknown_service:` + // concatenated with [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, the + // value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + // A namespace for `service.name`. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` is + // expected to be unique for all services that have no explicit namespace defined + // (so the empty/unspecified namespace is simply one more valid namespace). Zero- + // length namespace string is assumed equal to unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + // The string ID of the service instance. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be globally + // unique). The ID helps to distinguish instances of the same service that exist + // at the same time (e.g. instances of a horizontally scaled service). It is + // preferable for the ID to be persistent and stay the same for the lifetime of + // the service instance, however it is acceptable that the ID is ephemeral and + // changes during important lifetime events for the service (e.g. service + // restarts). If the service has no inherent unique ID that can be used as the + // value of this attribute it is recommended to generate a random Version 1 or + // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + // The version string of the service API or implementation. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '2.0.0' + ServiceVersionKey = attribute.Key("service.version") +) + +// The telemetry SDK used to capture data recorded by the instrumentation libraries. +const ( + // The name of the telemetry SDK as defined above. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + // The language of the telemetry SDK. + // + // Type: Enum + // Required: No + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + // The version string of the telemetry SDK. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + // The version string of the auto instrumentation agent, if used. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '1.2.3' + TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") +) + +// Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. +const ( + // The name of the web engine. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + // The version of the web engine. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") + // Additional description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/schema.go new file mode 100644 index 0000000000000..2f2a019e43db5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/schema.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.12.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go new file mode 100644 index 0000000000000..047d8e95cce9b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.12.0/trace.go @@ -0,0 +1,1704 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.12.0" + +import "go.opentelemetry.io/otel/attribute" + +// Span attributes used by AWS Lambda (in addition to general `faas` attributes). +const ( + // The full invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next` + // applicable). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `faas.id` if an alias is involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// This document defines attributes for CloudEvents. CloudEvents is a specification on how to define event data in a standard way. These attributes can be attached to spans when performing operations with CloudEvents, regardless of the protocol being used. +const ( + // The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec + // .md#id) uniquely identifies the event. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + // The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.m + // d#source-1) identifies the context in which an event happened. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my- + // service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + // The [version of the CloudEvents specification](https://github.com/cloudevents/s + // pec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + // The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/sp + // ec.md#type) contains a value describing the type of event related to the + // originating occurrence. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") + // The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec. + // md#subject) of the event in the context of the event producer (identified by + // source). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") +) + +// This document defines semantic conventions for the OpenTracing Shim +const ( + // Parent-child Reference type + // + // Type: Enum + // Required: No + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span does not depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// This document defines the attributes used to perform database client calls. +const ( + // An identifier for the database management system (DBMS) product being used. See + // below for a list of well-known identifiers. + // + // Type: Enum + // Required: Always + // Stability: stable + DBSystemKey = attribute.Key("db.system") + // The connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + // Username for accessing the database. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") + // The fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver + // used to connect. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + // This attribute is used to report the name of the database being accessed. For + // commands that switch the database, this should be set to the target database + // (even if the command fails). + // + // Type: string + // Required: Required, if applicable. + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called "schema + // name". In case there are multiple layers that could be considered for database + // name (e.g. Oracle instance name and schema name), the database name to be used + // is the more specific layer (e.g. Oracle schema name). + DBNameKey = attribute.Key("db.name") + // The database statement being executed. + // + // Type: string + // Required: Required if applicable and not explicitly disabled via + // instrumentation configuration. + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + // Note: The value may be sanitized to exclude sensitive information. + DBStatementKey = attribute.Key("db.statement") + // The name of the operation being executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // Required: Required, if `db.statement` is not applicable. + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to attempt any + // client-side parsing of `db.statement` just to get this property, but it should + // be set if the operation name is provided by the library being instrumented. If + // the SQL statement has an ambiguous operation, or performs more than one + // operation, this value may be omitted. + DBOperationKey = attribute.Key("db.operation") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") +) + +// Connection-level attributes for Microsoft SQL Server +const ( + // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en- + // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named instance. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer + // required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") +) + +// Call-level attributes for Cassandra +const ( + // The fetch size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + // The consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra- + // oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // Required: No + // Stability: stable + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + // The name of the primary table that the operation is acting upon, including the + // keyspace name (if applicable). + // + // Type: string + // Required: Recommended if available. + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra rather + // than sql. It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting upon an + // anonymous table, or more than one table, this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + // Whether or not the query is idempotent. + // + // Type: boolean + // Required: No + // Stability: stable + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + // The number of times a query was speculatively executed. Not set or `0` if the + // query was not executed speculatively. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + // The ID of the coordinating node for a query. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + // The data center of the coordinating node for a query. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// Call-level attributes for Redis +const ( + // The index of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To be used + // instead of the generic `db.name` attribute. + // + // Type: int + // Required: Required, if other than the default database (`0`). + // Stability: stable + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") +) + +// Call-level attributes for MongoDB +const ( + // The collection being accessed within the database stated in `db.name`. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") +) + +// Call-level attributes for SQL databases +const ( + // The name of the primary table that the operation is acting upon, including the + // database name (if applicable). + // + // Type: string + // Required: Recommended if available. + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting upon an + // anonymous table, or more than one table, this value MUST NOT be set. + DBSQLTableKey = attribute.Key("db.sql.table") +) + +// This document defines the attributes used to report a single exception associated with a span. +const ( + // The type of the exception (its fully-qualified class name, if applicable). The + // dynamic type of the exception should be preferred over the static type in + // languages that support it. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") + // The exception message. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + // A stacktrace as a string in the natural representation for the language + // runtime. The representation is to be determined and documented by each language + // SIG. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + // SHOULD be set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // Required: No + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of a span, + // if that span is ended while the exception is still logically "in flight". + // This may be actually "in flight" in some languages (e.g. if the exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most languages. + + // It is usually not possible to determine at the point where an exception is + // thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending the span, + // as done in the [example above](#recording-an-exception). + + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") +) + +// This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. +const ( + // Type of the trigger which caused this function execution. + // + // Type: Enum + // Required: No + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // `faas.trigger` MUST be set. + + // Clients invoking FaaS instances usually cannot set `faas.trigger`, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + FaaSTriggerKey = attribute.Key("faas.trigger") + // The execution ID of the current function execution. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSExecutionKey = attribute.Key("faas.execution") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. +const ( + // The name of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos + // DB to the database name. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + // Describes the type of the operation that was performed on the data. + // + // Type: Enum + // Required: Always + // Stability: stable + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + // A string containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed + // in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // Required: Always + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + // The document name/table subjected to the operation. For example, in Cloud + // Storage or S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // A string containing the function invocation time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed + // in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // Required: Always + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + // A string containing the schedule period as [Cron Expression](https://docs.oracl + // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") +) + +// Contains additional attributes for incoming FaaS spans. +const ( + // A boolean that is true if the serverless function is executed for the first + // time (aka cold-start). + // + // Type: boolean + // Required: No + // Stability: stable + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// Contains additional attributes for outgoing FaaS spans. +const ( + // The name of the invoked function. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked + // function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + // The cloud provider of the invoked function. + // + // Type: Enum + // Required: Always + // Stability: stable + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked + // function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + // The cloud region of the invoked function. + // + // Type: string + // Required: For some cloud providers, like AWS or GCP, the region in which a + // function is hosted is essential to uniquely identify the function and also part + // of its endpoint. Since it's part of the endpoint being called, the region is + // always known to clients. In these cases, `faas.invoked_region` MUST be set + // accordingly. If the region is unknown to the client or not required for + // identifying the invoked function, setting `faas.invoked_region` is optional. + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked + // function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// These attributes may be used for any network related operation. +const ( + // Transport protocol used. See note below. + // + // Type: Enum + // Required: No + // Stability: stable + NetTransportKey = attribute.Key("net.transport") + // Remote address of the peer (dotted decimal for IPv4 or + // [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6) + // + // Type: string + // Required: No + // Stability: stable + // Examples: '127.0.0.1' + NetPeerIPKey = attribute.Key("net.peer.ip") + // Remote port number. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 80, 8080, 443 + NetPeerPortKey = attribute.Key("net.peer.port") + // Remote hostname or similar, see note below. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'example.com' + // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an extra + // DNS lookup. + NetPeerNameKey = attribute.Key("net.peer.name") + // Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '192.168.0.1' + NetHostIPKey = attribute.Key("net.host.ip") + // Like `net.peer.port` but for the host port. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 35555 + NetHostPortKey = attribute.Key("net.host.port") + // Local hostname or similar, see note below. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'localhost' + NetHostNameKey = attribute.Key("net.host.name") + // The internet connection type currently being used by the host. + // + // Type: Enum + // Required: No + // Stability: stable + // Examples: 'wifi' + NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") + // This describes more details regarding the connection.type. It may be the type + // of cell technology connection, but it could be used for describing details + // about a wifi connection. + // + // Type: Enum + // Required: No + // Stability: stable + // Examples: 'LTE' + NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") + // The name of the mobile carrier. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'sprint' + NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") + // The mobile carrier country code. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '310' + NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") + // The mobile carrier network code. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '001' + NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") + // The ISO 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'DE' + NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") +) + +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Another IP-based protocol + NetTransportIP = NetTransportKey.String("ip") + // Unix Domain socket. See below + NetTransportUnix = NetTransportKey.String("unix") + // Named or anonymous pipe. See note below + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) + +var ( + // wifi + NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") + // wired + NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") + // cell + NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") + // unavailable + NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") + // unknown + NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") +) + +var ( + // GPRS + NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") + // EDGE + NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") + // UMTS + NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") + // CDMA + NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") + // HSPA + NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") + // IDEN + NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") + // LTE + NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") + // EHRPD + NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") + // GSM + NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") +) + +// Operations that access some remote service. +const ( + // The [`service.name`](../../resource/semantic_conventions/README.md#service) of + // the remote service. SHOULD be equal to the actual `service.name` resource + // attribute of the remote service if any. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// These attributes may be used for any operation with an authenticated and/or authorized enduser. +const ( + // Username or client_id extracted from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the + // inbound request from outside the system. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + // Actual/assumed role the client is making the request under extracted from token + // or application security context. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + // Scopes or granted authorities the client currently possesses extracted from + // token or application security context. The value would come from the scope + // associated with an [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value + // in a [SAML 2.0 Assertion](http://docs.oasis- + // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// These attributes may be used for any operation to store information about a thread that started a span. +const ( + // Current "managed" thread ID (as opposed to OS thread ID). + // + // Type: int + // Required: No + // Stability: stable + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + // Current thread name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// These attributes allow to report this unit of code and therefore to provide more context about the span. +const ( + // The method or function name, or equivalent (usually rightmost part of the code + // unit's name). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + // The "namespace" within which `code.function` is defined. Usually the qualified + // class or module name, such that `code.namespace` + some separator + + // `code.function` form a unique identifier for the code unit. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + // The source code file name that identifies the code unit as uniquely as possible + // (preferably an absolute file path). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + // The line number in `code.filepath` best representing the operation. It SHOULD + // point within the code unit named in `code.function`. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") +) + +// This document defines semantic conventions for HTTP client and server Spans. +const ( + // HTTP request method. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. + // Usually the fragment is not transmitted over HTTP, but if it is known, it + // should be included nevertheless. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: `http.url` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case the attribute's + // value should be `https://www.example.com/`. + HTTPURLKey = attribute.Key("http.url") + // The full request target as passed in a HTTP request line or equivalent. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '/path/12314/?q=ddds#123' + HTTPTargetKey = attribute.Key("http.target") + // The value of the [HTTP host + // header](https://tools.ietf.org/html/rfc7230#section-5.4). An empty Host header + // should also be reported, see note. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'www.example.org' + // Note: When the header is present but empty the attribute SHOULD be set to the + // empty string. Note that this is a valid situation that is expected in certain + // cases, according the aforementioned [section of RFC + // 7230](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is not + // set the attribute MUST NOT be set. + HTTPHostKey = attribute.Key("http.host") + // The URI scheme identifying the used protocol. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // Required: If and only if one was received/sent. + // Stability: stable + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") + // Kind of HTTP protocol used. + // + // Type: Enum + // Required: No + // Stability: stable + // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP` + // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed. + HTTPFlavorKey = attribute.Key("http.flavor") + // Value of the [HTTP User- + // Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the + // client. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + HTTPUserAgentKey = attribute.Key("http.user_agent") + // The size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For + // requests using transport encoding, this should be the compressed size. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + // The size of the uncompressed request payload body after transport decoding. Not + // set if transport encoding not used. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 5493 + HTTPRequestContentLengthUncompressedKey = attribute.Key("http.request_content_length_uncompressed") + // The size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For + // requests using transport encoding, this should be the compressed size. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") + // The size of the uncompressed response payload body after transport decoding. + // Not set if transport encoding not used. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 5493 + HTTPResponseContentLengthUncompressedKey = attribute.Key("http.response_content_length_uncompressed") + // The ordinal number of request re-sending attempt. + // + // Type: int + // Required: If and only if a request was retried. + // Stability: stable + // Examples: 3 + HTTPRetryCountKey = attribute.Key("http.retry_count") +) + +var ( + // HTTP/1.0 + HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") + // HTTP/1.1 + HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") + // HTTP/2 + HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") + // HTTP/3 + HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") + // SPDY protocol + HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") + // QUIC protocol + HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") +) + +// Semantic Convention for HTTP Server +const ( + // The primary server name of the matched virtual host. This should be obtained + // via configuration. If no such configuration can be obtained, this attribute + // MUST NOT be set ( `net.host.name` should be used instead). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'example.com' + // Note: `http.url` is usually not readily available on the server side but would + // have to be assembled in a cumbersome and sometimes lossy process from other + // information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus + // preferred to supply the raw data that is available. + HTTPServerNameKey = attribute.Key("http.server_name") + // The matched route (path template). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '/users/:userID?' + HTTPRouteKey = attribute.Key("http.route") + // The IP address of the original client behind all proxies, if known (e.g. from + // [X-Forwarded-For](https://developer.mozilla.org/en- + // US/docs/Web/HTTP/Headers/X-Forwarded-For)). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as `net.peer.ip`, which would + // identify the network-level peer, which may be a proxy. + + // This attribute should be set when a source of information different + // from the one used for `net.peer.ip`, is available even if that other + // source just confirms the same value as `net.peer.ip`. + // Rationale: For `net.peer.ip`, one typically does not know if it + // comes from a proxy, reverse proxy, or the actual client. Setting + // `http.client_ip` when it's the same as `net.peer.ip` means that + // one is at least somewhat confident that the address is not that of + // the closest proxy. + HTTPClientIPKey = attribute.Key("http.client_ip") +) + +// Attributes that exist for multiple DynamoDB request types. +const ( + // The keys in the `RequestItems` object field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + // The JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { + // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + // The JSON-serialized value of the `ItemCollectionMetrics` response field. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. + // + // Type: double + // Required: No + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // Required: No + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + // The value of the `ConsistentRead` request parameter. + // + // Type: boolean + // Required: No + // Stability: stable + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + // The value of the `ProjectionExpression` request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, + // ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + // The value of the `Limit` request parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + // The value of the `AttributesToGet` request parameter. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + // The value of the `IndexName` request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + // The value of the `Select` request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") +) + +// DynamoDB.CreateTable +const ( + // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request + // field + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": + // number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request + // field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// DynamoDB.ListTables +const ( + // The value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + // The the number of items in the `TableNames` response parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// DynamoDB.Query +const ( + // The value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // Required: No + // Stability: stable + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// DynamoDB.Scan +const ( + // The value of the `Segment` request parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + // The value of the `TotalSegments` request parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + // The value of the `Count` response parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + // The value of the `ScannedCount` response parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") +) + +// DynamoDB.UpdateTable +const ( + // The JSON-serialized value of each item in the `AttributeDefinitions` request + // field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + // The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` + // request field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// This document defines the attributes used in messaging systems. +const ( + // A string identifying the messaging system. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + MessagingSystemKey = attribute.Key("messaging.system") + // The message destination name. This might be equal to the span name but is + // required nevertheless. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + MessagingDestinationKey = attribute.Key("messaging.destination") + // The kind of message destination + // + // Type: Enum + // Required: Required only if the message destination is either a `queue` or + // `topic`. + // Stability: stable + MessagingDestinationKindKey = attribute.Key("messaging.destination_kind") + // A boolean that is true if the message destination is temporary. + // + // Type: boolean + // Required: If missing, it is assumed to be false. + // Stability: stable + MessagingTempDestinationKey = attribute.Key("messaging.temp_destination") + // The name of the transport protocol. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'AMQP', 'MQTT' + MessagingProtocolKey = attribute.Key("messaging.protocol") + // The version of the transport protocol. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0.9.1' + MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version") + // Connection string. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'tibjmsnaming://localhost:7222', + // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' + MessagingURLKey = attribute.Key("messaging.url") + // A value used by the messaging system as an identifier for the message, + // represented as a string. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message_id") + // The [conversation ID](#conversations) identifying the conversation to which the + // message belongs, represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'MyConversationID' + MessagingConversationIDKey = attribute.Key("messaging.conversation_id") + // The (uncompressed) size of the message payload in bytes. Also use this + // attribute if it is unknown whether the compressed or uncompressed payload size + // is reported. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 2738 + MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes") + // The compressed size of the message payload in bytes. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 2048 + MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes") +) + +var ( + // A message sent to a queue + MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") + // A message sent to a topic + MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") +) + +// Semantic convention for a consumer of messages received from a messaging system +const ( + // A string identifying the kind of message consumption as defined in the + // [Operation names](#operation-names) section above. If the operation is "send", + // this attribute MUST NOT be set, since the operation can be inferred from the + // span kind in that case. + // + // Type: Enum + // Required: No + // Stability: stable + MessagingOperationKey = attribute.Key("messaging.operation") + // The identifier for the consumer receiving a message. For Kafka, set it to + // `{messaging.kafka.consumer_group} - {messaging.kafka.client_id}`, if both are + // present, or only `messaging.kafka.consumer_group`. For brokers, such as + // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the + // message. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'mygroup - client-6' + MessagingConsumerIDKey = attribute.Key("messaging.consumer_id") +) + +var ( + // receive + MessagingOperationReceive = MessagingOperationKey.String("receive") + // process + MessagingOperationProcess = MessagingOperationKey.String("process") +) + +// Attributes for RabbitMQ +const ( + // RabbitMQ message routing key. + // + // Type: string + // Required: Unless it is empty. + // Stability: stable + // Examples: 'myKey' + MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key") +) + +// Attributes for Apache Kafka +const ( + // Message keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from `messaging.message_id` in + // that they're not unique. If the key is `null`, the attribute MUST NOT be set. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key") + // Name of the Kafka Consumer Group that is handling the message. Only applies to + // consumers, not producers. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group") + // Client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'client-5' + MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") + // Partition the message is sent to. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 2 + MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition") + // A boolean that is true if the message is a tombstone. + // + // Type: boolean + // Required: If missing, it is assumed to be false. + // Stability: stable + MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone") +) + +// Attributes for Apache RocketMQ +const ( + // Namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + // Name of the RocketMQ producer/consumer group that is handling the message. The + // client type is identified by the SpanKind. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + // The unique identifier for each client. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'myhost@8742@s8083jm' + MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") + // Type of message. + // + // Type: Enum + // Required: No + // Stability: stable + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message_type") + // The secondary classifier of message besides topic. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message_tag") + // Key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message_keys") + // Model of message consumption. This only applies to consumer spans. + // + // Type: Enum + // Required: No + // Stability: stable + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +// This document defines semantic conventions for remote procedure calls. +const ( + // A string identifying the remoting system. See below for a list of well-known + // identifiers. + // + // Type: Enum + // Required: Always + // Stability: stable + RPCSystemKey = attribute.Key("rpc.system") + // The full (logical) name of the service being called, including its package + // name, if applicable. + // + // Type: string + // Required: No, but recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The `code.namespace` attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + RPCServiceKey = attribute.Key("rpc.service") + // The name of the (logical) method being called, must be equal to the $method + // part in the span name. + // + // Type: string + // Required: No, but recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the latter + // (e.g., method actually executing the call on the server side, RPC client stub + // method on the client side). + RPCMethodKey = attribute.Key("rpc.method") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") +) + +// Tech-specific attributes for gRPC. +const ( + // The [numeric status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC + // request. + // + // Type: Enum + // Required: Always + // Stability: stable + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC + // 1.0 does not specify this, the value can be omitted. + // + // Type: string + // Required: If missing, it is assumed to be "1.0". + // Stability: stable + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + // `id` property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be cast to + // string for simplicity. Use empty string in case of `null` value. Omit entirely + // if this is a notification. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + // `error.code` property of response if it is an error response. + // + // Type: int + // Required: If missing, response is assumed to be successful. + // Stability: stable + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + // `error.message` property of response if it is an error response. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") +) + +// RPC received/sent message. +const ( + // Whether this is a received or sent message. + // + // Type: Enum + // Required: No + // Stability: stable + MessageTypeKey = attribute.Key("message.type") + // MUST be calculated as two different counters starting from `1` one for sent + // messages and one for received message. + // + // Type: int + // Required: No + // Stability: stable + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + // Compressed size of the message in bytes. + // + // Type: int + // Required: No + // Stability: stable + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + // Uncompressed size of the message in bytes. + // + // Type: int + // Required: No + // Stability: stable + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) diff --git a/vendor/go.opentelemetry.io/otel/internal/metric/registry/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go similarity index 56% rename from vendor/go.opentelemetry.io/otel/internal/metric/registry/doc.go rename to vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go index 2f17562f0eb54..71a1f7748d558 100644 --- a/vendor/go.opentelemetry.io/otel/internal/metric/registry/doc.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go @@ -12,13 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -/* -Package registry provides a non-standalone implementation of -MeterProvider that adds uniqueness checking for instrument descriptors -on top of other MeterProvider it wraps. - -This package is currently in a pre-GA phase. Backwards incompatible changes -may be introduced in subsequent minor version releases as we work to track the -evolving OpenTelemetry specification and user feedback. -*/ -package registry // import "go.opentelemetry.io/otel/internal/metric/registry" +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the conventions +// as of the v1.17.0 version of the OpenTelemetry specification. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go new file mode 100644 index 0000000000000..9b8c559de4279 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go new file mode 100644 index 0000000000000..d5c4b5c136aa7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +// HTTP scheme attributes. +var ( + HTTPSchemeHTTP = HTTPSchemeKey.String("http") + HTTPSchemeHTTPS = HTTPSchemeKey.String("https") +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go new file mode 100644 index 0000000000000..c60b2a6bb68ae --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/httpconv/http.go @@ -0,0 +1,150 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package httpconv provides OpenTelemetry semantic convetions for the net/http +// package from the standard library. +package httpconv // import "go.opentelemetry.io/otel/semconv/v1.17.0/httpconv" + +import ( + "net/http" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/semconv/internal/v2" + semconv "go.opentelemetry.io/otel/semconv/v1.17.0" +) + +var ( + nc = &internal.NetConv{ + NetHostNameKey: semconv.NetHostNameKey, + NetHostPortKey: semconv.NetHostPortKey, + NetPeerNameKey: semconv.NetPeerNameKey, + NetPeerPortKey: semconv.NetPeerPortKey, + NetSockPeerAddrKey: semconv.NetSockPeerAddrKey, + NetSockPeerPortKey: semconv.NetSockPeerPortKey, + NetTransportOther: semconv.NetTransportOther, + NetTransportTCP: semconv.NetTransportTCP, + NetTransportUDP: semconv.NetTransportUDP, + NetTransportInProc: semconv.NetTransportInProc, + } + + hc = &internal.HTTPConv{ + NetConv: nc, + + EnduserIDKey: semconv.EnduserIDKey, + HTTPClientIPKey: semconv.HTTPClientIPKey, + HTTPFlavorKey: semconv.HTTPFlavorKey, + HTTPMethodKey: semconv.HTTPMethodKey, + HTTPRequestContentLengthKey: semconv.HTTPRequestContentLengthKey, + HTTPResponseContentLengthKey: semconv.HTTPResponseContentLengthKey, + HTTPRouteKey: semconv.HTTPRouteKey, + HTTPSchemeHTTP: semconv.HTTPSchemeHTTP, + HTTPSchemeHTTPS: semconv.HTTPSchemeHTTPS, + HTTPStatusCodeKey: semconv.HTTPStatusCodeKey, + HTTPTargetKey: semconv.HTTPTargetKey, + HTTPURLKey: semconv.HTTPURLKey, + HTTPUserAgentKey: semconv.HTTPUserAgentKey, + } +) + +// ClientResponse returns attributes for an HTTP response received by a client +// from a server. It will return the following attributes if the related values +// are defined in resp: "http.status.code", "http.response_content_length". +// +// This does not add all OpenTelemetry required attributes for an HTTP event, +// it assumes ClientRequest was used to create the span with a complete set of +// attributes. If a complete set of attributes can be generated using the +// request contained in resp. For example: +// +// append(ClientResponse(resp), ClientRequest(resp.Request)...) +func ClientResponse(resp *http.Response) []attribute.KeyValue { + return hc.ClientResponse(resp) +} + +// ClientRequest returns attributes for an HTTP request made by a client. The +// following attributes are always returned: "http.url", "http.flavor", +// "http.method", "net.peer.name". The following attributes are returned if the +// related values are defined in req: "net.peer.port", "http.user_agent", +// "http.request_content_length", "enduser.id". +func ClientRequest(req *http.Request) []attribute.KeyValue { + return hc.ClientRequest(req) +} + +// ClientStatus returns a span status code and message for an HTTP status code +// value received by a client. +func ClientStatus(code int) (codes.Code, string) { + return hc.ClientStatus(code) +} + +// ServerRequest returns attributes for an HTTP request received by a server. +// +// The server must be the primary server name if it is known. For example this +// would be the ServerName directive +// (https://httpd.apache.org/docs/2.4/mod/core.html#servername) for an Apache +// server, and the server_name directive +// (http://nginx.org/en/docs/http/ngx_http_core_module.html#server_name) for an +// nginx server. More generically, the primary server name would be the host +// header value that matches the default virtual host of an HTTP server. It +// should include the host identifier and if a port is used to route to the +// server that port identifier should be included as an appropriate port +// suffix. +// +// If the primary server name is not known, server should be an empty string. +// The req Host will be used to determine the server instead. +// +// The following attributes are always returned: "http.method", "http.scheme", +// "http.flavor", "http.target", "net.host.name". The following attributes are +// returned if they related values are defined in req: "net.host.port", +// "net.sock.peer.addr", "net.sock.peer.port", "http.user_agent", "enduser.id", +// "http.client_ip". +func ServerRequest(server string, req *http.Request) []attribute.KeyValue { + return hc.ServerRequest(server, req) +} + +// ServerStatus returns a span status code and message for an HTTP status code +// value returned by a server. Status codes in the 400-499 range are not +// returned as errors. +func ServerStatus(code int) (codes.Code, string) { + return hc.ServerStatus(code) +} + +// RequestHeader returns the contents of h as attributes. +// +// Instrumentation should require an explicit configuration of which headers to +// captured and then prune what they pass here. Including all headers can be a +// security risk - explicit configuration helps avoid leaking sensitive +// information. +// +// The User-Agent header is already captured in the http.user_agent attribute +// from ClientRequest and ServerRequest. Instrumentation may provide an option +// to capture that header here even though it is not recommended. Otherwise, +// instrumentation should filter that out of what is passed. +func RequestHeader(h http.Header) []attribute.KeyValue { + return hc.RequestHeader(h) +} + +// ResponseHeader returns the contents of h as attributes. +// +// Instrumentation should require an explicit configuration of which headers to +// captured and then prune what they pass here. Including all headers can be a +// security risk - explicit configuration helps avoid leaking sensitive +// information. +// +// The User-Agent header is already captured in the http.user_agent attribute +// from ClientRequest and ServerRequest. Instrumentation may provide an option +// to capture that header here even though it is not recommended. Otherwise, +// instrumentation should filter that out of what is passed. +func ResponseHeader(h http.Header) []attribute.KeyValue { + return hc.ResponseHeader(h) +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go new file mode 100644 index 0000000000000..add7987a22b04 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go @@ -0,0 +1,1118 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// The web browser in which the application represented by the resource is running. The `browser.*` attributes MUST be used only for resources that represent applications running in a web browser (regardless of whether running on a mobile or desktop device). +const ( + // Array of brand name and version separated by a space + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.brands`). + BrowserBrandsKey = attribute.Key("browser.brands") + // The platform on which the browser is running + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Windows', 'macOS', 'Android' + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.platform`). If unavailable, the legacy + // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD + // be left unset in order for the values to be consistent. + // The list of possible values is defined in the [W3C User-Agent Client Hints + // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform). + // Note that some (but not all) of these values can overlap with values in the + // [`os.type` and `os.name` attributes](./os.md). However, for consistency, the + // values in the `browser.platform` attribute should capture the exact value that + // the user agent provides. + BrowserPlatformKey = attribute.Key("browser.platform") + // A boolean that is true if the browser is running on a mobile device + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + // Note: This value is intended to be taken from the [UA client hints + // API](https://wicg.github.io/ua-client-hints/#interface) + // (`navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be + // left unset. + BrowserMobileKey = attribute.Key("browser.mobile") + // Full user-agent string provided by the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 + // (KHTML, ' + // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36' + // Note: The user-agent value SHOULD be provided only from browsers that do not + // have a mechanism to retrieve brands and platform individually from the User- + // Agent Client Hints API. To retrieve the value, the legacy `navigator.userAgent` + // API can be used. + BrowserUserAgentKey = attribute.Key("browser.user_agent") + // Preferred language of the user using the browser + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'en', 'en-US', 'fr', 'fr-FR' + // Note: This value is intended to be taken from the Navigator API + // `navigator.language`. + BrowserLanguageKey = attribute.Key("browser.language") +) + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // Name of the cloud provider. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + CloudProviderKey = attribute.Key("cloud.provider") + // The cloud account ID the resource is assigned to. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + // The geographical region the resource is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + // Note: Refer to your provider's docs to see the available regions, for example + // [Alibaba Cloud regions](https://www.alibabacloud.com/help/doc- + // detail/40654.htm), [AWS regions](https://aws.amazon.com/about-aws/global- + // infrastructure/regions_az/), [Azure regions](https://azure.microsoft.com/en- + // us/global-infrastructure/geographies/), [Google Cloud + // regions](https://cloud.google.com/about/locations), or [Tencent Cloud + // regions](https://intl.cloud.tencent.com/document/product/213/6091). + CloudRegionKey = attribute.Key("cloud.region") + // Cloud regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the resource + // is running. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + // The cloud platform in use. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") + // IBM Cloud + CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud") + // Tencent Cloud + CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // Red Hat OpenShift on Alibaba Cloud + CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // AWS App Runner + CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner") + // Red Hat OpenShift on AWS (ROSA) + CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Azure Red Hat OpenShift + CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") + // Red Hat OpenShift on Google Cloud + CloudPlatformGoogleCloudOpenshift = CloudPlatformKey.String("google_cloud_openshift") + // Red Hat OpenShift on IBM Cloud + CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift") + // Tencent Cloud Cloud Virtual Machine (CVM) + CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm") + // Tencent Cloud Elastic Kubernetes Service (EKS) + CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks") + // Tencent Cloud Serverless Cloud Function (SCF) + CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf") +) + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws. + // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo + // perguide/clusters.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l + // aunch_types.html) for an ECS task. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates + // t/developerguide/task_definitions.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + // The task definition family this task definition is a member of. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + // The revision for this task definition. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // The ARN of an EKS cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// Resources specific to Amazon Web Services. +const ( + // The name(s) of the AWS log group(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each write + // to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + // The Amazon Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- + // access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + // The name(s) of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + // The ARN(s) of the AWS log stream(s). + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- + // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- + // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain + // several log streams, so these ARNs necessarily identify both a log group and a + // log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") +) + +// A container instance. +const ( + // Container name used by container runtime. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + // Container ID. Usually a UUID, as for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container- + // identification). The UUID might be abbreviated. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + // The container runtime managing this container. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") + // Name of the image the container was built on. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + // Container image tag. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + ContainerImageTagKey = attribute.Key("container.image.tag") +) + +// The software deployment. +const ( + // Name of the [deployment + // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'staging', 'production' + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// The device on which the process represented by this resource is running. +const ( + // A unique identifier representing the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values outlined + // below. This value is not an advertising identifier and MUST NOT be used as + // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id + // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden + // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the + // Firebase Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on best + // practices and exact implementation details. Caution should be taken when + // storing personal data or anything which can identify a user. GDPR and data + // protection laws may apply, ensure you do your own due diligence. + DeviceIDKey = attribute.Key("device.id") + // The model identifier for the device + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version of the + // model identifier rather than the market or consumer-friendly name of the + // device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + // The marketing name for the device model + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of the + // device model rather than a machine readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") + // The name of the device manufacturer + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Apple', 'Samsung' + // Note: The Android OS provides this field via + // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER). + // iOS apps SHOULD hardcode the value `Apple`. + DeviceManufacturerKey = attribute.Key("device.manufacturer") +) + +// A serverless instance. +const ( + // The name of the single function that this runtime instance executes. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function', 'myazurefunctionapp/some-function-name' + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback + // function (which may be stored in the + // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span- + // general.md#source-code-attributes) + // span attributes). + + // For some cloud providers, the above definition is ambiguous. The following + // definition of function name MUST be used for this attribute + // (and consequently the span name) for the listed cloud providers/products: + + // * **Azure:** The full name `/`, i.e., function app name + // followed by a forward slash followed by the function name (this form + // can also be seen in the resource JSON for the function). + // This means that a span attribute MUST be used, as an Azure function + // app can host multiple functions that would usually share + // a TracerProvider (see also the `faas.id` attribute). + FaaSNameKey = attribute.Key("faas.name") + // The unique ID of the single function that this runtime instance executes. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' + // Note: On some cloud providers, it may not be possible to determine the full ID + // at startup, + // so consider setting `faas.id` as a span attribute instead. + + // The exact value to use for `faas.id` depends on the cloud provider: + + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and- + // namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration- + // aliases.html) + // with the resolved function version, as the same runtime instance may be + // invokable with + // multiple different aliases. + // * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full- + // resource-names) + // * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en- + // us/rest/api/resources/resources/get-by-id) of the invoked function, + // *not* the function app, having the form + // `/subscriptions//resourceGroups//providers/Microsoft.We + // b/sites//functions/`. + // This means that a span attribute MUST be used, as an Azure function app can + // host multiple functions that would usually share + // a TracerProvider. + FaaSIDKey = attribute.Key("faas.id") + // The immutable version of the function being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration- + // versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run:** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env- + // var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") + // The execution environment ID as a string, that will be potentially reused for + // other invocations to the same function/function version. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + // The amount of memory available to the serverless function in MiB. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 128 + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this + // information. + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") +) + +// A host is defined as a general computing instance. +const ( + // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud + // provider. For non-containerized Linux systems, the `machine-id` located in + // `/etc/machine-id` or `/var/lib/dbus/machine-id` may be used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'fdbf79e8af94cb7f9e8df36789187052' + HostIDKey = attribute.Key("host.id") + // Name of the host. On Unix systems, it may contain what the hostname command + // returns, or the fully qualified hostname, or another name specified by the + // user. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + // Type of host. For Cloud, this must be the machine type. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") + // The CPU architecture the host system is running on. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + HostArchKey = attribute.Key("host.arch") + // Name of the VM image or OS install the host was instantiated from. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + // VM image ID. For Cloud, this value is from the provider. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + // The version string of the VM image as defined in [Version + // Attributes](README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // IBM z/Architecture + HostArchS390x = HostArchKey.String("s390x") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// A Kubernetes Cluster. +const ( + // The name of the cluster. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") +) + +// A Kubernetes Node object. +const ( + // The name of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + // The UID of the Node. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") +) + +// A Kubernetes Namespace. +const ( + // The name of the namespace that the pod is running in. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") +) + +// A Kubernetes Pod object. +const ( + // The UID of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + // The name of the Pod. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") +) + +// A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // The name of the Container from Pod specification, must be unique within a Pod. + // Container runtime usually uses different globally unique name + // (`container.name`). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") + // Number of times the container was restarted. This attribute can be used to + // identify a particular container (running or stopped) within a container spec. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count") +) + +// A Kubernetes ReplicaSet object. +const ( + // The UID of the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + // The name of the ReplicaSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") +) + +// A Kubernetes Deployment object. +const ( + // The UID of the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + // The name of the Deployment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") +) + +// A Kubernetes StatefulSet object. +const ( + // The UID of the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + // The name of the StatefulSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") +) + +// A Kubernetes DaemonSet object. +const ( + // The UID of the DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + // The name of the DaemonSet. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") +) + +// A Kubernetes Job object. +const ( + // The UID of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + // The name of the Job. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") +) + +// A Kubernetes CronJob object. +const ( + // The UID of the CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + // The name of the CronJob. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") +) + +// The operating system (OS) on which the process represented by this resource is running. +const ( + // The operating system type. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + OSTypeKey = attribute.Key("os.type") + // Human readable (not intended to be parsed) OS version information, like e.g. + // reported by `ver` or `lsb_release -a` commands. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' + OSDescriptionKey = attribute.Key("os.description") + // Human readable operating system name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + // The version string of the operating system as defined in [Version + // Attributes](../../resource/semantic_conventions/README.md#version-attributes). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // SunOS, Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// An operating system process. +const ( + // Process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + // Parent Process identifier (PID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 111 + ProcessParentPIDKey = attribute.Key("process.parent_pid") + // The name of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes below.) + // Stability: stable + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + // The full path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes below.) + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + // The command used to launch the process (i.e. the command name). On Linux based + // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, + // can be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes below.) + // Stability: stable + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + // The full command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not + // set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // RequirementLevel: ConditionallyRequired (See alternative attributes below.) + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + // All the command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited strings + // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be + // the full argv vector passed to `main`. + // + // Type: string[] + // RequirementLevel: ConditionallyRequired (See alternative attributes below.) + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + // The username of the user that owns the process. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") +) + +// The single (language) runtime instance which is monitored. +const ( + // The name of the runtime of this process. For compiled native binaries, this + // SHOULD be the name of the compiler. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + // The version of the runtime of this process, as returned by the runtime without + // modification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + // An additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") +) + +// A service instance. +const ( + // Logical name of the service. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to `unknown_service:` + // concatenated with [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, the + // value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + // A namespace for `service.name`. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` is + // expected to be unique for all services that have no explicit namespace defined + // (so the empty/unspecified namespace is simply one more valid namespace). Zero- + // length namespace string is assumed equal to unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + // The string ID of the service instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be globally + // unique). The ID helps to distinguish instances of the same service that exist + // at the same time (e.g. instances of a horizontally scaled service). It is + // preferable for the ID to be persistent and stay the same for the lifetime of + // the service instance, however it is acceptable that the ID is ephemeral and + // changes during important lifetime events for the service (e.g. service + // restarts). If the service has no inherent unique ID that can be used as the + // value of this attribute it is recommended to generate a random Version 1 or + // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + // The version string of the service API or implementation. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2.0.0' + ServiceVersionKey = attribute.Key("service.version") +) + +// The telemetry SDK used to capture data recorded by the instrumentation libraries. +const ( + // The name of the telemetry SDK as defined above. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'opentelemetry' + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + // The language of the telemetry SDK. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + // The version string of the telemetry SDK. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + // The version string of the auto instrumentation agent, if used. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.2.3' + TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") + // swift + TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift") +) + +// Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. +const ( + // The name of the web engine. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + // The version of the web engine. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") + // Additional description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") +) + +// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's concepts. +const ( + // The name of the instrumentation scope - (`InstrumentationScope.Name` in OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'io.opentelemetry.contrib.mongodb' + OtelScopeNameKey = attribute.Key("otel.scope.name") + // The version of the instrumentation scope - (`InstrumentationScope.Version` in + // OTLP). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0.0' + OtelScopeVersionKey = attribute.Key("otel.scope.version") +) + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Scope's concepts. +const ( + // Deprecated, use the `otel.scope.name` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: 'io.opentelemetry.contrib.mongodb' + OtelLibraryNameKey = attribute.Key("otel.library.name") + // Deprecated, use the `otel.scope.version` attribute. + // + // Type: string + // RequirementLevel: Optional + // Stability: deprecated + // Examples: '1.0.0' + OtelLibraryVersionKey = attribute.Key("otel.library.version") +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go new file mode 100644 index 0000000000000..42fc525d1657d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/1.17.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go new file mode 100644 index 0000000000000..01e5f072a2184 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go @@ -0,0 +1,1892 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0" + +import "go.opentelemetry.io/otel/attribute" + +// This document defines the shared attributes used to report a single exception associated with a span or log. +const ( + // The type of the exception (its fully-qualified class name, if applicable). The + // dynamic type of the exception should be preferred over the static type in + // languages that support it. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") + // The exception message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + // A stacktrace as a string in the natural representation for the language + // runtime. The representation is to be determined and documented by each language + // SIG. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") +) + +// This document defines attributes for Events represented using Log Records. +const ( + // The name identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'click', 'exception' + EventNameKey = attribute.Key("event.name") + // The domain identifies the business context for the events. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: Events across different domains may have same `event.name`, yet be + // unrelated events. + EventDomainKey = attribute.Key("event.domain") +) + +var ( + // Events from browser apps + EventDomainBrowser = EventDomainKey.String("browser") + // Events from mobile apps + EventDomainDevice = EventDomainKey.String("device") + // Events from Kubernetes + EventDomainK8S = EventDomainKey.String("k8s") +) + +// Span attributes used by AWS Lambda (in addition to general `faas` attributes). +const ( + // The full invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next` + // applicable). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `faas.id` if an alias is involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// This document defines attributes for CloudEvents. CloudEvents is a specification on how to define event data in a standard way. These attributes can be attached to spans when performing operations with CloudEvents, regardless of the protocol being used. +const ( + // The [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec + // .md#id) uniquely identifies the event. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001' + CloudeventsEventIDKey = attribute.Key("cloudevents.event_id") + // The [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.m + // d#source-1) identifies the context in which an event happened. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://github.com/cloudevents', '/cloudevents/spec/pull/123', 'my- + // service' + CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source") + // The [version of the CloudEvents specification](https://github.com/cloudevents/s + // pec/blob/v1.0.2/cloudevents/spec.md#specversion) which the event uses. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '1.0' + CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version") + // The [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/sp + // ec.md#type) contains a value describing the type of event related to the + // originating occurrence. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.github.pull_request.opened', 'com.example.object.deleted.v2' + CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type") + // The [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec. + // md#subject) of the event in the context of the event producer (identified by + // source). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mynewfile.jpg' + CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject") +) + +// This document defines semantic conventions for the OpenTracing Shim +const ( + // Parent-child Reference type + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: The causal relationship between a child Span and a parent Span. + OpentracingRefTypeKey = attribute.Key("opentracing.ref_type") +) + +var ( + // The parent Span depends on the child Span in some capacity + OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of") + // The parent Span does not depend in any way on the result of the child Span + OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from") +) + +// This document defines the attributes used to perform database client calls. +const ( + // An identifier for the database management system (DBMS) product being used. See + // below for a list of well-known identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + DBSystemKey = attribute.Key("db.system") + // The connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + // Username for accessing the database. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") + // The fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver + // used to connect. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + // This attribute is used to report the name of the database being accessed. For + // commands that switch the database, this should be set to the target database + // (even if the command fails). + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable.) + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called "schema + // name". In case there are multiple layers that could be considered for database + // name (e.g. Oracle instance name and schema name), the database name to be used + // is the more specific layer (e.g. Oracle schema name). + DBNameKey = attribute.Key("db.name") + // The database statement being executed. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If applicable and not explicitly + // disabled via instrumentation configuration.) + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + // Note: The value may be sanitized to exclude sensitive information. + DBStatementKey = attribute.Key("db.statement") + // The name of the operation being executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If `db.statement` is not applicable.) + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to attempt any + // client-side parsing of `db.statement` just to get this property, but it should + // be set if the operation name is provided by the library being instrumented. If + // the SQL statement has an ambiguous operation, or performs more than one + // operation, this value may be omitted. + DBOperationKey = attribute.Key("db.operation") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") + // OpenSearch + DBSystemOpensearch = DBSystemKey.String("opensearch") + // ClickHouse + DBSystemClickhouse = DBSystemKey.String("clickhouse") +) + +// Connection-level attributes for Microsoft SQL Server +const ( + // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en- + // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named instance. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer + // required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") +) + +// Call-level attributes for Cassandra +const ( + // The fetch size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + // The consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra- + // oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + // The name of the primary table that the operation is acting upon, including the + // keyspace name (if applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra rather + // than sql. It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting upon an + // anonymous table, or more than one table, this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + // Whether or not the query is idempotent. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + // The number of times a query was speculatively executed. Not set or `0` if the + // query was not executed speculatively. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + // The ID of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + // The data center of the coordinating node for a query. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// Call-level attributes for Redis +const ( + // The index of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To be used + // instead of the generic `db.name` attribute. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If other than the default database + // (`0`).) + // Stability: stable + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") +) + +// Call-level attributes for MongoDB +const ( + // The collection being accessed within the database stated in `db.name`. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") +) + +// Call-level attributes for SQL databases +const ( + // The name of the primary table that the operation is acting upon, including the + // database name (if applicable). + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting upon an + // anonymous table, or more than one table, this value MUST NOT be set. + DBSQLTableKey = attribute.Key("db.sql.table") +) + +// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's concepts. +const ( + // Name of the code, either "OK" or "ERROR". MUST NOT be set if the status code is + // UNSET. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + OtelStatusCodeKey = attribute.Key("otel.status_code") + // Description of the Status if it has a value, otherwise not set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'resource not found' + OtelStatusDescriptionKey = attribute.Key("otel.status_description") +) + +var ( + // The operation has been validated by an Application developer or Operator to have completed successfully + OtelStatusCodeOk = OtelStatusCodeKey.String("OK") + // The operation contains an error + OtelStatusCodeError = OtelStatusCodeKey.String("ERROR") +) + +// This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. +const ( + // Type of the trigger which caused this function execution. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: For the server/consumer span on the incoming side, + // `faas.trigger` MUST be set. + + // Clients invoking FaaS instances usually cannot set `faas.trigger`, + // since they would typically need to look in the payload to determine + // the event type. If clients set it, it should be the same as the + // trigger that corresponding incoming would have (i.e., this has + // nothing to do with the underlying transport used to make the API + // call to invoke the lambda, which is often HTTP). + FaaSTriggerKey = attribute.Key("faas.trigger") + // The execution ID of the current function execution. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSExecutionKey = attribute.Key("faas.execution") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. +const ( + // The name of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos + // DB to the database name. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + // Describes the type of the operation that was performed on the data. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + // A string containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed + // in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + // The document name/table subjected to the operation. For example, in Cloud + // Storage or S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // A string containing the function invocation time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed + // in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + // A string containing the schedule period as [Cron Expression](https://docs.oracl + // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") +) + +// Contains additional attributes for incoming FaaS spans. +const ( + // A boolean that is true if the serverless function is executed for the first + // time (aka cold-start). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// Contains additional attributes for outgoing FaaS spans. +const ( + // The name of the invoked function. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked + // function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + // The cloud provider of the invoked function. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked + // function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + // The cloud region of the invoked function. + // + // Type: string + // RequirementLevel: ConditionallyRequired (For some cloud providers, like AWS or + // GCP, the region in which a function is hosted is essential to uniquely identify + // the function and also part of its endpoint. Since it's part of the endpoint + // being called, the region is always known to clients. In these cases, + // `faas.invoked_region` MUST be set accordingly. If the region is unknown to the + // client or not required for identifying the invoked function, setting + // `faas.invoked_region` is optional.) + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked + // function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") + // Tencent Cloud + FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud") +) + +// These attributes may be used for any network related operation. +const ( + // Transport protocol used. See note below. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + NetTransportKey = attribute.Key("net.transport") + // Application layer protocol used. The value SHOULD be normalized to lowercase. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'amqp', 'http', 'mqtt' + NetAppProtocolNameKey = attribute.Key("net.app.protocol.name") + // Version of the application layer protocol used. See note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '3.1.1' + // Note: `net.app.protocol.version` refers to the version of the protocol used and + // might be different from the protocol client's version. If the HTTP client used + // has a version of `0.27.2`, but sends HTTP version `1.1`, this attribute should + // be set to `1.1`. + NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version") + // Remote socket peer name. + // + // Type: string + // RequirementLevel: Recommended (If available and different from `net.peer.name` + // and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 'proxy.example.com' + NetSockPeerNameKey = attribute.Key("net.sock.peer.name") + // Remote socket peer address: IPv4 or IPv6 for internet protocols, path for local + // communication, [etc](https://man7.org/linux/man- + // pages/man7/address_families.7.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '127.0.0.1', '/tmp/mysql.sock' + NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr") + // Remote socket peer port. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.peer.port` and if `net.sock.peer.addr` is set.) + // Stability: stable + // Examples: 16456 + NetSockPeerPortKey = attribute.Key("net.sock.peer.port") + // Protocol [address family](https://man7.org/linux/man- + // pages/man7/address_families.7.html) which is used for communication. + // + // Type: Enum + // RequirementLevel: ConditionallyRequired (If different than `inet` and if any of + // `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers of telemetry + // SHOULD accept both IPv4 and IPv6 formats for the address in + // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support + // instrumentations that follow previous versions of this document.) + // Stability: stable + // Examples: 'inet6', 'bluetooth' + NetSockFamilyKey = attribute.Key("net.sock.family") + // Logical remote hostname, see note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'example.com' + // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an extra + // DNS lookup. + NetPeerNameKey = attribute.Key("net.peer.name") + // Logical remote port number + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 80, 8080, 443 + NetPeerPortKey = attribute.Key("net.peer.port") + // Logical local hostname or similar, see note below. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'localhost' + NetHostNameKey = attribute.Key("net.host.name") + // Logical local port number, preferably the one that the peer used to connect + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 8080 + NetHostPortKey = attribute.Key("net.host.port") + // Local socket address. Useful in case of a multi-IP host. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '192.168.0.1' + NetSockHostAddrKey = attribute.Key("net.sock.host.addr") + // Local socket port number. + // + // Type: int + // RequirementLevel: Recommended (If defined for the address family and if + // different than `net.host.port` and if `net.sock.host.addr` is set.) + // Stability: stable + // Examples: 35555 + NetSockHostPortKey = attribute.Key("net.sock.host.port") + // The internet connection type currently being used by the host. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'wifi' + NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") + // This describes more details regarding the connection.type. It may be the type + // of cell technology connection, but it could be used for describing details + // about a wifi connection. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'LTE' + NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") + // The name of the mobile carrier. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'sprint' + NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") + // The mobile carrier country code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '310' + NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") + // The mobile carrier network code. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '001' + NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") + // The ISO 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'DE' + NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") +) + +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Named or anonymous pipe. See note below + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) + +var ( + // IPv4 address + NetSockFamilyInet = NetSockFamilyKey.String("inet") + // IPv6 address + NetSockFamilyInet6 = NetSockFamilyKey.String("inet6") + // Unix domain socket path + NetSockFamilyUnix = NetSockFamilyKey.String("unix") +) + +var ( + // wifi + NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") + // wired + NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") + // cell + NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") + // unavailable + NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") + // unknown + NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") +) + +var ( + // GPRS + NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") + // EDGE + NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") + // UMTS + NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") + // CDMA + NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") + // HSPA + NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") + // IDEN + NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") + // LTE + NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") + // EHRPD + NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") + // GSM + NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") +) + +// Operations that access some remote service. +const ( + // The [`service.name`](../../resource/semantic_conventions/README.md#service) of + // the remote service. SHOULD be equal to the actual `service.name` resource + // attribute of the remote service if any. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// These attributes may be used for any operation with an authenticated and/or authorized enduser. +const ( + // Username or client_id extracted from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the + // inbound request from outside the system. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + // Actual/assumed role the client is making the request under extracted from token + // or application security context. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + // Scopes or granted authorities the client currently possesses extracted from + // token or application security context. The value would come from the scope + // associated with an [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value + // in a [SAML 2.0 Assertion](http://docs.oasis- + // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// These attributes may be used for any operation to store information about a thread that started a span. +const ( + // Current "managed" thread ID (as opposed to OS thread ID). + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + // Current thread name. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// These attributes allow to report this unit of code and therefore to provide more context about the span. +const ( + // The method or function name, or equivalent (usually rightmost part of the code + // unit's name). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + // The "namespace" within which `code.function` is defined. Usually the qualified + // class or module name, such that `code.namespace` + some separator + + // `code.function` form a unique identifier for the code unit. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + // The source code file name that identifies the code unit as uniquely as possible + // (preferably an absolute file path). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + // The line number in `code.filepath` best representing the operation. It SHOULD + // point within the code unit named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") + // The column number in `code.filepath` best representing the operation. It SHOULD + // point within the code unit named in `code.function`. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 16 + CodeColumnKey = attribute.Key("code.column") +) + +// This document defines semantic conventions for HTTP client and server Spans. +const ( + // HTTP request method. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // RequirementLevel: ConditionallyRequired (If and only if one was received/sent.) + // Stability: stable + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") + // Kind of HTTP protocol used. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP` + // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed. + HTTPFlavorKey = attribute.Key("http.flavor") + // Value of the [HTTP User-Agent](https://www.rfc- + // editor.org/rfc/rfc9110.html#field.user-agent) header sent by the client. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + HTTPUserAgentKey = attribute.Key("http.user_agent") + // The size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content- + // length) header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + // The size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content- + // length) header. For requests using transport encoding, this should be the + // compressed size. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") +) + +var ( + // HTTP/1.0 + HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") + // HTTP/1.1 + HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") + // HTTP/2 + HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") + // HTTP/3 + HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0") + // SPDY protocol + HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") + // QUIC protocol + HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") +) + +// Semantic Convention for HTTP Client +const ( + // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. + // Usually the fragment is not transmitted over HTTP, but if it is known, it + // should be included nevertheless. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: `http.url` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case the attribute's + // value should be `https://www.example.com/`. + HTTPURLKey = attribute.Key("http.url") + // The ordinal number of request resending attempt (for any reason, including + // redirects). + // + // Type: int + // RequirementLevel: Recommended (if and only if request was retried.) + // Stability: stable + // Examples: 3 + // Note: The resend count SHOULD be updated each time an HTTP request gets resent + // by the client, regardless of what was the cause of the resending (e.g. + // redirection, authorization failure, 503 Server Unavailable, network issues, or + // any other). + HTTPResendCountKey = attribute.Key("http.resend_count") +) + +// Semantic Convention for HTTP Server +const ( + // The URI scheme identifying the used protocol. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + // The full request target as passed in a HTTP request line or equivalent. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: '/path/12314/?q=ddds' + HTTPTargetKey = attribute.Key("http.target") + // The matched route (path template in the format used by the respective server + // framework). See note below + // + // Type: string + // RequirementLevel: ConditionallyRequired (If and only if it's available) + // Stability: stable + // Examples: '/users/:userID?', '{controller}/{action}/{id?}' + // Note: 'http.route' MUST NOT be populated when this is not supported by the HTTP + // server framework as the route attribute should have low-cardinality and the URI + // path can NOT substitute it. + HTTPRouteKey = attribute.Key("http.route") + // The IP address of the original client behind all proxies, if known (e.g. from + // [X-Forwarded-For](https://developer.mozilla.org/en- + // US/docs/Web/HTTP/Headers/X-Forwarded-For)). + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as `net.sock.peer.addr`, which would + // identify the network-level peer, which may be a proxy. + + // This attribute should be set when a source of information different + // from the one used for `net.sock.peer.addr`, is available even if that other + // source just confirms the same value as `net.sock.peer.addr`. + // Rationale: For `net.sock.peer.addr`, one typically does not know if it + // comes from a proxy, reverse proxy, or the actual client. Setting + // `http.client_ip` when it's the same as `net.sock.peer.addr` means that + // one is at least somewhat confident that the address is not that of + // the closest proxy. + HTTPClientIPKey = attribute.Key("http.client_ip") +) + +// Attributes that exist for multiple DynamoDB request types. +const ( + // The keys in the `RequestItems` object field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + // The JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { + // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + // The JSON-serialized value of the `ItemCollectionMetrics` response field. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // RequirementLevel: Optional + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + // The value of the `ConsistentRead` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + // The value of the `ProjectionExpression` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, + // ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + // The value of the `Limit` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + // The value of the `AttributesToGet` request parameter. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + // The value of the `IndexName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + // The value of the `Select` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") +) + +// DynamoDB.CreateTable +const ( + // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request + // field + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": + // number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// DynamoDB.ListTables +const ( + // The value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + // The the number of items in the `TableNames` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// DynamoDB.Query +const ( + // The value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// DynamoDB.Scan +const ( + // The value of the `Segment` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + // The value of the `TotalSegments` request parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + // The value of the `Count` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + // The value of the `ScannedCount` response parameter. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") +) + +// DynamoDB.UpdateTable +const ( + // The JSON-serialized value of each item in the `AttributeDefinitions` request + // field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + // The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` + // request field. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// This document defines semantic conventions to apply when instrumenting the GraphQL implementation. They map GraphQL operations to attributes on a Span. +const ( + // The name of the operation being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'findBookByID' + GraphqlOperationNameKey = attribute.Key("graphql.operation.name") + // The type of the operation being executed. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query', 'mutation', 'subscription' + GraphqlOperationTypeKey = attribute.Key("graphql.operation.type") + // The GraphQL document being executed. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'query findBookByID { bookByID(id: ?) { name } }' + // Note: The value may be sanitized to exclude sensitive information. + GraphqlDocumentKey = attribute.Key("graphql.document") +) + +var ( + // GraphQL query + GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query") + // GraphQL mutation + GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation") + // GraphQL subscription + GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription") +) + +// Semantic convention describing per-message attributes populated on messaging spans or links. +const ( + // A value used by the messaging system as an identifier for the message, + // represented as a string. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message.id") + // The [conversation ID](#conversations) identifying the conversation to which the + // message belongs, represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyConversationID' + MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id") + // The (uncompressed) size of the message payload in bytes. Also use this + // attribute if it is unknown whether the compressed or uncompressed payload size + // is reported. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2738 + MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes") + // The compressed size of the message payload in bytes. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2048 + MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes") +) + +// Semantic convention for attributes that describe messaging destination on broker +const ( + // The message destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Destination name SHOULD uniquely identify a specific queue, topic or + // other entity within the broker. If + // the broker does not have such notion, the destination name SHOULD uniquely + // identify the broker. + MessagingDestinationNameKey = attribute.Key("messaging.destination.name") + // The kind of message destination + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationKindKey = attribute.Key("messaging.destination.kind") + // Low cardinality representation of the messaging destination name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Destination names could be constructed from templates. An example would + // be a destination name involving a user name or product id. Although the + // destination name in this case is of high cardinality, the underlying template + // is of low cardinality and can be effectively used for grouping and aggregation. + MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template") + // A boolean that is true if the message destination is temporary and might not + // exist anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary") + // A boolean that is true if the message destination is anonymous (could be + // unnamed or have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous") +) + +var ( + // A message sent to a queue + MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") + // A message sent to a topic + MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") +) + +// Semantic convention for attributes that describe messaging source on broker +const ( + // The message source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + // Note: Source name SHOULD uniquely identify a specific queue, topic, or other + // entity within the broker. If + // the broker does not have such notion, the source name SHOULD uniquely identify + // the broker. + MessagingSourceNameKey = attribute.Key("messaging.source.name") + // The kind of message source + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingSourceKindKey = attribute.Key("messaging.source.kind") + // Low cardinality representation of the messaging source name + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '/customers/{customerID}' + // Note: Source names could be constructed from templates. An example would be a + // source name involving a user name or product id. Although the source name in + // this case is of high cardinality, the underlying template is of low cardinality + // and can be effectively used for grouping and aggregation. + MessagingSourceTemplateKey = attribute.Key("messaging.source.template") + // A boolean that is true if the message source is temporary and might not exist + // anymore after messages are processed. + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary") + // A boolean that is true if the message source is anonymous (could be unnamed or + // have auto-generated name). + // + // Type: boolean + // RequirementLevel: Optional + // Stability: stable + MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous") +) + +var ( + // A message received from a queue + MessagingSourceKindQueue = MessagingSourceKindKey.String("queue") + // A message received from a topic + MessagingSourceKindTopic = MessagingSourceKindKey.String("topic") +) + +// This document defines general attributes used in messaging systems. +const ( + // A string identifying the messaging system. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS' + MessagingSystemKey = attribute.Key("messaging.system") + // A string identifying the kind of messaging operation as defined in the + // [Operation names](#operation-names) section above. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + // Note: If a custom value is used, it MUST be of low cardinality. + MessagingOperationKey = attribute.Key("messaging.operation") + // The number of messages sent, received, or processed in the scope of the + // batching operation. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the span describes an operation on + // a batch of messages.) + // Stability: stable + // Examples: 0, 1, 2 + // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on spans + // that operate with a single message. When a messaging client library supports + // both batch and single-message API for the same operation, instrumentations + // SHOULD use `messaging.batch.message_count` for batching APIs and SHOULD NOT use + // it for single-message APIs. + MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count") +) + +var ( + // publish + MessagingOperationPublish = MessagingOperationKey.String("publish") + // receive + MessagingOperationReceive = MessagingOperationKey.String("receive") + // process + MessagingOperationProcess = MessagingOperationKey.String("process") +) + +// Semantic convention for a consumer of messages received from a messaging system +const ( + // The identifier for the consumer receiving a message. For Kafka, set it to + // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both are + // present, or only `messaging.kafka.consumer.group`. For brokers, such as + // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the + // message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'mygroup - client-6' + MessagingConsumerIDKey = attribute.Key("messaging.consumer.id") +) + +// Attributes for RabbitMQ +const ( + // RabbitMQ message routing key. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If not empty.) + // Stability: stable + // Examples: 'myKey' + MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key") +) + +// Attributes for Apache Kafka +const ( + // Message keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from `messaging.message.id` in + // that they're not unique. If the key is `null`, the attribute MUST NOT be set. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key") + // Name of the Kafka Consumer Group that is handling the message. Only applies to + // consumers, not producers. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group") + // Client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'client-5' + MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") + // Partition the message is sent to. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition") + // Partition the message is received from. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 2 + MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition") + // The offset of a record in the corresponding Kafka partition. + // + // Type: int + // RequirementLevel: Optional + // Stability: stable + // Examples: 42 + MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset") + // A boolean that is true if the message is a tombstone. + // + // Type: boolean + // RequirementLevel: ConditionallyRequired (If value is `true`. When missing, the + // value is assumed to be `false`.) + // Stability: stable + MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone") +) + +// Attributes for Apache RocketMQ +const ( + // Namespace of RocketMQ resources, resources in different namespaces are + // individual. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myNamespace' + MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace") + // Name of the RocketMQ producer/consumer group that is handling the message. The + // client type is identified by the SpanKind. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myConsumerGroup' + MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group") + // The unique identifier for each client. + // + // Type: string + // RequirementLevel: Required + // Stability: stable + // Examples: 'myhost@8742@s8083jm' + MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id") + // The timestamp in milliseconds that the delay message is expected to be + // delivered to consumer. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay and delay + // time level is not specified.) + // Stability: stable + // Examples: 1665987217045 + MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp") + // The delay time level for delay message, which determines the message delay + // time. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If the message type is delay and + // delivery timestamp is not specified.) + // Stability: stable + // Examples: 3 + MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level") + // It is essential for FIFO message. Messages that belong to the same message + // group are always processed one by one within the same consumer group. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If the message type is FIFO.) + // Stability: stable + // Examples: 'myMessageGroup' + MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group") + // Type of message. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type") + // The secondary classifier of message besides topic. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'tagA' + MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag") + // Key(s) of message, another way to mark message besides message id. + // + // Type: string[] + // RequirementLevel: Optional + // Stability: stable + // Examples: 'keyA', 'keyB' + MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys") + // Model of message consumption. This only applies to consumer spans. + // + // Type: Enum + // RequirementLevel: Optional + // Stability: stable + MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model") +) + +var ( + // Normal message + MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal") + // FIFO message + MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo") + // Delay message + MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay") + // Transaction message + MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction") +) + +var ( + // Clustering consumption model + MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering") + // Broadcasting consumption model + MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting") +) + +// This document defines semantic conventions for remote procedure calls. +const ( + // A string identifying the remoting system. See below for a list of well-known + // identifiers. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCSystemKey = attribute.Key("rpc.system") + // The full (logical) name of the service being called, including its package + // name, if applicable. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The `code.namespace` attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + RPCServiceKey = attribute.Key("rpc.service") + // The name of the (logical) method being called, must be equal to the $method + // part in the span name. + // + // Type: string + // RequirementLevel: Recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the latter + // (e.g., method actually executing the call on the server side, RPC client stub + // method on the client side). + RPCMethodKey = attribute.Key("rpc.method") +) + +var ( + // gRPC + RPCSystemGRPC = RPCSystemKey.String("grpc") + // Java RMI + RPCSystemJavaRmi = RPCSystemKey.String("java_rmi") + // .NET WCF + RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf") + // Apache Dubbo + RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo") +) + +// Tech-specific attributes for gRPC. +const ( + // The [numeric status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC + // request. + // + // Type: Enum + // RequirementLevel: Required + // Stability: stable + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC + // 1.0 does not specify this, the value can be omitted. + // + // Type: string + // RequirementLevel: ConditionallyRequired (If other than the default version + // (`1.0`)) + // Stability: stable + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + // `id` property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be cast to + // string for simplicity. Use empty string in case of `null` value. Omit entirely + // if this is a notification. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + // `error.code` property of response if it is an error response. + // + // Type: int + // RequirementLevel: ConditionallyRequired (If response is not successful.) + // Stability: stable + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + // `error.message` property of response if it is an error response. + // + // Type: string + // RequirementLevel: Optional + // Stability: stable + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/http.go index 9b430fac0c640..945c95a18ad58 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/http.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/http.go @@ -15,16 +15,12 @@ package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0" import ( - "fmt" - "net" "net/http" - "strconv" - "strings" - - "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/semconv/internal" + "go.opentelemetry.io/otel/trace" ) // HTTP scheme attributes. @@ -33,163 +29,60 @@ var ( HTTPSchemeHTTPS = HTTPSchemeKey.String("https") ) +var sc = &internal.SemanticConventions{ + EnduserIDKey: EnduserIDKey, + HTTPClientIPKey: HTTPClientIPKey, + HTTPFlavorKey: HTTPFlavorKey, + HTTPHostKey: HTTPHostKey, + HTTPMethodKey: HTTPMethodKey, + HTTPRequestContentLengthKey: HTTPRequestContentLengthKey, + HTTPRouteKey: HTTPRouteKey, + HTTPSchemeHTTP: HTTPSchemeHTTP, + HTTPSchemeHTTPS: HTTPSchemeHTTPS, + HTTPServerNameKey: HTTPServerNameKey, + HTTPStatusCodeKey: HTTPStatusCodeKey, + HTTPTargetKey: HTTPTargetKey, + HTTPURLKey: HTTPURLKey, + HTTPUserAgentKey: HTTPUserAgentKey, + NetHostIPKey: NetHostIPKey, + NetHostNameKey: NetHostNameKey, + NetHostPortKey: NetHostPortKey, + NetPeerIPKey: NetPeerIPKey, + NetPeerNameKey: NetPeerNameKey, + NetPeerPortKey: NetPeerPortKey, + NetTransportIP: NetTransportIP, + NetTransportOther: NetTransportOther, + NetTransportTCP: NetTransportTCP, + NetTransportUDP: NetTransportUDP, + NetTransportUnix: NetTransportUnix, +} + // NetAttributesFromHTTPRequest generates attributes of the net // namespace as specified by the OpenTelemetry specification for a // span. The network parameter is a string that net.Dial function // from standard library can understand. func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - - switch network { - case "tcp", "tcp4", "tcp6": - attrs = append(attrs, NetTransportTCP) - case "udp", "udp4", "udp6": - attrs = append(attrs, NetTransportUDP) - case "ip", "ip4", "ip6": - attrs = append(attrs, NetTransportIP) - case "unix", "unixgram", "unixpacket": - attrs = append(attrs, NetTransportUnix) - default: - attrs = append(attrs, NetTransportOther) - } - - peerIP, peerName, peerPort := hostIPNamePort(request.RemoteAddr) - if peerIP != "" { - attrs = append(attrs, NetPeerIPKey.String(peerIP)) - } - if peerName != "" { - attrs = append(attrs, NetPeerNameKey.String(peerName)) - } - if peerPort != 0 { - attrs = append(attrs, NetPeerPortKey.Int(peerPort)) - } - - hostIP, hostName, hostPort := "", "", 0 - for _, someHost := range []string{request.Host, request.Header.Get("Host"), request.URL.Host} { - hostIP, hostName, hostPort = hostIPNamePort(someHost) - if hostIP != "" || hostName != "" || hostPort != 0 { - break - } - } - if hostIP != "" { - attrs = append(attrs, NetHostIPKey.String(hostIP)) - } - if hostName != "" { - attrs = append(attrs, NetHostNameKey.String(hostName)) - } - if hostPort != 0 { - attrs = append(attrs, NetHostPortKey.Int(hostPort)) - } - - return attrs -} - -// hostIPNamePort extracts the IP address, name and (optional) port from hostWithPort. -// It handles both IPv4 and IPv6 addresses. If the host portion is not recognized -// as a valid IPv4 or IPv6 address, the `ip` result will be empty and the -// host portion will instead be returned in `name`. -func hostIPNamePort(hostWithPort string) (ip string, name string, port int) { - var ( - hostPart, portPart string - parsedPort uint64 - err error - ) - if hostPart, portPart, err = net.SplitHostPort(hostWithPort); err != nil { - hostPart, portPart = hostWithPort, "" - } - if parsedIP := net.ParseIP(hostPart); parsedIP != nil { - ip = parsedIP.String() - } else { - name = hostPart - } - if parsedPort, err = strconv.ParseUint(portPart, 10, 16); err == nil { - port = int(parsedPort) - } - return + return sc.NetAttributesFromHTTPRequest(network, request) } // EndUserAttributesFromHTTPRequest generates attributes of the // enduser namespace as specified by the OpenTelemetry specification // for a span. func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - if username, _, ok := request.BasicAuth(); ok { - return []attribute.KeyValue{EnduserIDKey.String(username)} - } - return nil + return sc.EndUserAttributesFromHTTPRequest(request) } // HTTPClientAttributesFromHTTPRequest generates attributes of the // http namespace as specified by the OpenTelemetry specification for // a span on the client side. func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - - if request.Method != "" { - attrs = append(attrs, HTTPMethodKey.String(request.Method)) - } else { - attrs = append(attrs, HTTPMethodKey.String(http.MethodGet)) - } - - // remove any username/password info that may be in the URL - // before adding it to the attributes - userinfo := request.URL.User - request.URL.User = nil - - attrs = append(attrs, HTTPURLKey.String(request.URL.String())) - - // restore any username/password info that was removed - request.URL.User = userinfo - - return append(attrs, httpCommonAttributesFromHTTPRequest(request)...) -} - -func httpCommonAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - if ua := request.UserAgent(); ua != "" { - attrs = append(attrs, HTTPUserAgentKey.String(ua)) - } - if request.ContentLength > 0 { - attrs = append(attrs, HTTPRequestContentLengthKey.Int64(request.ContentLength)) - } - - return append(attrs, httpBasicAttributesFromHTTPRequest(request)...) -} - -func httpBasicAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { - // as these attributes are used by HTTPServerMetricAttributesFromHTTPRequest, they should be low-cardinality - attrs := []attribute.KeyValue{} - - if request.TLS != nil { - attrs = append(attrs, HTTPSchemeHTTPS) - } else { - attrs = append(attrs, HTTPSchemeHTTP) - } - - if request.Host != "" { - attrs = append(attrs, HTTPHostKey.String(request.Host)) - } - - flavor := "" - if request.ProtoMajor == 1 { - flavor = fmt.Sprintf("1.%d", request.ProtoMinor) - } else if request.ProtoMajor == 2 { - flavor = "2" - } - if flavor != "" { - attrs = append(attrs, HTTPFlavorKey.String(flavor)) - } - - return attrs + return sc.HTTPClientAttributesFromHTTPRequest(request) } // HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes // to be used with server-side HTTP metrics. func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{} - if serverName != "" { - attrs = append(attrs, HTTPServerNameKey.String(serverName)) - } - return append(attrs, httpBasicAttributesFromHTTPRequest(request)...) + return sc.HTTPServerMetricAttributesFromHTTPRequest(serverName, request) } // HTTPServerAttributesFromHTTPRequest generates attributes of the @@ -197,116 +90,25 @@ func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http. // a span on the server side. Currently, only basic authentication is // supported. func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { - attrs := []attribute.KeyValue{ - HTTPMethodKey.String(request.Method), - HTTPTargetKey.String(request.RequestURI), - } - - if serverName != "" { - attrs = append(attrs, HTTPServerNameKey.String(serverName)) - } - if route != "" { - attrs = append(attrs, HTTPRouteKey.String(route)) - } - if values, ok := request.Header["X-Forwarded-For"]; ok && len(values) > 0 { - if addresses := strings.SplitN(values[0], ",", 2); len(addresses) > 0 { - attrs = append(attrs, HTTPClientIPKey.String(addresses[0])) - } - } - - return append(attrs, httpCommonAttributesFromHTTPRequest(request)...) + return sc.HTTPServerAttributesFromHTTPRequest(serverName, route, request) } // HTTPAttributesFromHTTPStatusCode generates attributes of the http // namespace as specified by the OpenTelemetry specification for a // span. func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { - attrs := []attribute.KeyValue{ - HTTPStatusCodeKey.Int(code), - } - return attrs -} - -type codeRange struct { - fromInclusive int - toInclusive int -} - -func (r codeRange) contains(code int) bool { - return r.fromInclusive <= code && code <= r.toInclusive -} - -var validRangesPerCategory = map[int][]codeRange{ - 1: { - {http.StatusContinue, http.StatusEarlyHints}, - }, - 2: { - {http.StatusOK, http.StatusAlreadyReported}, - {http.StatusIMUsed, http.StatusIMUsed}, - }, - 3: { - {http.StatusMultipleChoices, http.StatusUseProxy}, - {http.StatusTemporaryRedirect, http.StatusPermanentRedirect}, - }, - 4: { - {http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful… - {http.StatusMisdirectedRequest, http.StatusUpgradeRequired}, - {http.StatusPreconditionRequired, http.StatusTooManyRequests}, - {http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge}, - {http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons}, - }, - 5: { - {http.StatusInternalServerError, http.StatusLoopDetected}, - {http.StatusNotExtended, http.StatusNetworkAuthenticationRequired}, - }, + return sc.HTTPAttributesFromHTTPStatusCode(code) } // SpanStatusFromHTTPStatusCode generates a status code and a message // as specified by the OpenTelemetry specification for a span. func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { - spanCode, valid := validateHTTPStatusCode(code) - if !valid { - return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) - } - return spanCode, "" + return internal.SpanStatusFromHTTPStatusCode(code) } // SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message // as specified by the OpenTelemetry specification for a span. // Exclude 4xx for SERVER to set the appropriate status. func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { - spanCode, valid := validateHTTPStatusCode(code) - if !valid { - return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) - } - category := code / 100 - if spanKind == trace.SpanKindServer && category == 4 { - return codes.Unset, "" - } - return spanCode, "" -} - -// Validates the HTTP status code and returns corresponding span status code. -// If the `code` is not a valid HTTP status code, returns span status Error -// and false. -func validateHTTPStatusCode(code int) (codes.Code, bool) { - category := code / 100 - ranges, ok := validRangesPerCategory[category] - if !ok { - return codes.Error, false - } - ok = false - for _, crange := range ranges { - ok = crange.contains(code) - if ok { - break - } - } - if !ok { - return codes.Error, false - } - if category > 0 && category < 4 { - return codes.Unset, true - } - return codes.Error, true + return internal.SpanStatusFromHTTPStatusCodeAndSpanKind(code, spanKind) } diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/schema.go index ec8b463d98e2f..64b6c46a58db1 100644 --- a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/schema.go +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/schema.go @@ -17,4 +17,4 @@ package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0" // SchemaURL is the schema URL that matches the version of the semantic conventions // that this package defines. Semconv packages starting from v1.4.0 must declare // non-empty schema URL in the form https://opentelemetry.io/schemas/ -const SchemaURL = "https://opentelemetry.io/schemas/v1.7.0" +const SchemaURL = "https://opentelemetry.io/schemas/1.7.0" diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go index 28b4f5e4d8214..caf7249de859b 100644 --- a/vendor/go.opentelemetry.io/otel/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace.go @@ -31,9 +31,12 @@ func Tracer(name string, opts ...trace.TracerOption) trace.Tracer { // If none is registered then an instance of NoopTracerProvider is returned. // // Use the trace provider to create a named tracer. E.g. -// tracer := otel.GetTracerProvider().Tracer("example.com/foo") +// +// tracer := otel.GetTracerProvider().Tracer("example.com/foo") +// // or -// tracer := otel.Tracer("example.com/foo") +// +// tracer := otel.Tracer("example.com/foo") func GetTracerProvider() trace.TracerProvider { return global.TracerProvider() } diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go index bcc333e04e2c7..f058cc781e00b 100644 --- a/vendor/go.opentelemetry.io/otel/trace/config.go +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -124,7 +124,7 @@ func NewSpanEndConfig(options ...SpanEndOption) SpanConfig { } // SpanStartOption applies an option to a SpanConfig. These options are applicable -// only when the span is created +// only when the span is created. type SpanStartOption interface { applySpanStart(SpanConfig) SpanConfig } diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go index 391417718f5ae..ab0346f9664ae 100644 --- a/vendor/go.opentelemetry.io/otel/trace/doc.go +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -17,7 +17,7 @@ Package trace provides an implementation of the tracing part of the OpenTelemetry API. To participate in distributed traces a Span needs to be created for the -operation being performed as part of a traced workflow. It its simplest form: +operation being performed as part of a traced workflow. In its simplest form: var tracer trace.Tracer diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go index ad9a9fc5be641..73950f207789c 100644 --- a/vendor/go.opentelemetry.io/otel/trace/noop.go +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -85,5 +85,5 @@ func (noopSpan) AddEvent(string, ...EventOption) {} // SetName does nothing. func (noopSpan) SetName(string) {} -// TracerProvider returns a no-op TracerProvider +// TracerProvider returns a no-op TracerProvider. func (noopSpan) TracerProvider() TracerProvider { return noopTracerProvider{} } diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go index 0923ceb98d574..4aa94f79f46ab 100644 --- a/vendor/go.opentelemetry.io/otel/trace/trace.go +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -63,7 +63,7 @@ func (t TraceID) MarshalJSON() ([]byte, error) { return json.Marshal(t.String()) } -// String returns the hex string representation form of a TraceID +// String returns the hex string representation form of a TraceID. func (t TraceID) String() string { return hex.EncodeToString(t[:]) } @@ -86,7 +86,7 @@ func (s SpanID) MarshalJSON() ([]byte, error) { return json.Marshal(s.String()) } -// String returns the hex string representation form of a SpanID +// String returns the hex string representation form of a SpanID. func (s SpanID) String() string { return hex.EncodeToString(s[:]) } @@ -151,7 +151,7 @@ func decodeHex(h string, b []byte) error { return nil } -// TraceFlags contains flags that can be set on a SpanContext +// TraceFlags contains flags that can be set on a SpanContext. type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`. // IsSampled returns if the sampling bit is set in the TraceFlags. @@ -160,7 +160,7 @@ func (tf TraceFlags) IsSampled() bool { } // WithSampled sets the sampling bit in a new copy of the TraceFlags. -func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { +func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // sampled is not a control flag. if sampled { return tf | FlagsSampled } @@ -174,7 +174,7 @@ func (tf TraceFlags) MarshalJSON() ([]byte, error) { return json.Marshal(tf.String()) } -// String returns the hex string representation form of TraceFlags +// String returns the hex string representation form of TraceFlags. func (tf TraceFlags) String() string { return hex.EncodeToString([]byte{byte(tf)}[:]) } @@ -364,8 +364,9 @@ type Span interface { SpanContext() SpanContext // SetStatus sets the status of the Span in the form of a code and a - // description, overriding previous values set. The description is only - // included in a status when the code is for an error. + // description, provided the status hasn't already been set to a higher + // value before (OK > Error > Unset). The description is only included in a + // status when the code is for an error. SetStatus(code codes.Code, description string) // SetName sets the Span name. @@ -386,16 +387,16 @@ type Span interface { // // For example, a Link is used in the following situations: // -// 1. Batch Processing: A batch of operations may contain operations -// associated with one or more traces/spans. Since there can only be one -// parent SpanContext, a Link is used to keep reference to the -// SpanContext of all operations in the batch. -// 2. Public Endpoint: A SpanContext for an in incoming client request on a -// public endpoint should be considered untrusted. In such a case, a new -// trace with its own identity and sampling decision needs to be created, -// but this new trace needs to be related to the original trace in some -// form. A Link is used to keep reference to the original SpanContext and -// track the relationship. +// 1. Batch Processing: A batch of operations may contain operations +// associated with one or more traces/spans. Since there can only be one +// parent SpanContext, a Link is used to keep reference to the +// SpanContext of all operations in the batch. +// 2. Public Endpoint: A SpanContext for an in incoming client request on a +// public endpoint should be considered untrusted. In such a case, a new +// trace with its own identity and sampling decision needs to be created, +// but this new trace needs to be related to the original trace in some +// form. A Link is used to keep reference to the original SpanContext and +// track the relationship. type Link struct { // SpanContext of the linked Span. SpanContext SpanContext @@ -503,17 +504,48 @@ type Tracer interface { Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) } -// TracerProvider provides access to instrumentation Tracers. +// TracerProvider provides Tracers that are used by instrumentation code to +// trace computational workflows. +// +// A TracerProvider is the collection destination of all Spans from Tracers it +// provides, it represents a unique telemetry collection pipeline. How that +// pipeline is defined, meaning how those Spans are collected, processed, and +// where they are exported, depends on its implementation. Instrumentation +// authors do not need to define this implementation, rather just use the +// provided Tracers to instrument code. +// +// Commonly, instrumentation code will accept a TracerProvider implementation +// at runtime from its users or it can simply use the globally registered one +// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider). // // Warning: methods may be added to this interface in minor releases. type TracerProvider interface { - // Tracer creates an implementation of the Tracer interface. - // The instrumentationName must be the name of the library providing - // instrumentation. This name may be the same as the instrumented code - // only if that code provides built-in instrumentation. If the - // instrumentationName is empty, then a implementation defined default - // name will be used instead. + // Tracer returns a unique Tracer scoped to be used by instrumentation code + // to trace computational workflows. The scope and identity of that + // instrumentation code is uniquely defined by the name and options passed. + // + // The passed name needs to uniquely identify instrumentation code. + // Therefore, it is recommended that name is the Go package name of the + // library providing instrumentation (note: not the code being + // instrumented). Instrumentation libraries can have multiple versions, + // therefore, the WithInstrumentationVersion option should be used to + // distinguish these different codebases. Additionally, instrumentation + // libraries may sometimes use traces to communicate different domains of + // workflow data (i.e. using spans to communicate workflow events only). If + // this is the case, the WithScopeAttributes option should be used to + // uniquely identify Tracers that handle the different domains of workflow + // data. + // + // If the same name and options are passed multiple times, the same Tracer + // will be returned (it is up to the implementation if this will be the + // same underlying instance of that Tracer or not). It is not necessary to + // call this multiple times with the same name and options to get an + // up-to-date Tracer. All implementations will ensure any TracerProvider + // configuration changes are propagated to all provided Tracers. + // + // If name is empty, then an implementation defined default name will be + // used instead. // - // This method must be concurrency safe. - Tracer(instrumentationName string, opts ...TracerOption) Tracer + // This method is safe to call concurrently. + Tracer(name string, options ...TracerOption) Tracer } diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go index 86fc62c1d8f29..ca68a82e5f73a 100644 --- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -21,7 +21,7 @@ import ( "strings" ) -var ( +const ( maxListMembers = 32 listDelimiter = "," @@ -32,10 +32,6 @@ var ( withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` - keyRe = regexp.MustCompile(`^((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))$`) - valueRe = regexp.MustCompile(`^(` + valueFormat + `)$`) - memberRe = regexp.MustCompile(`^\s*((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) - errInvalidKey errorConst = "invalid tracestate key" errInvalidValue errorConst = "invalid tracestate value" errInvalidMember errorConst = "invalid tracestate list-member" @@ -43,6 +39,12 @@ var ( errDuplicate errorConst = "duplicate list-member in tracestate" ) +var ( + keyRe = regexp.MustCompile(`^((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))$`) + valueRe = regexp.MustCompile(`^(` + valueFormat + `)$`) + memberRe = regexp.MustCompile(`^\s*((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) +) + type member struct { Key string Value string @@ -68,7 +70,6 @@ func parseMember(m string) (member, error) { Key: matches[1], Value: matches[4], }, nil - } // String encodes member into a string compliant with the W3C Trace Context @@ -171,7 +172,8 @@ func (ts TraceState) Get(key string) string { // specification an error is returned with the original TraceState. // // If adding a new list-member means the TraceState would have more members -// than is allowed an error is returned instead with the original TraceState. +// then is allowed, the new list-member will be inserted and the right-most +// list-member will be dropped in the returned TraceState. func (ts TraceState) Insert(key, value string) (TraceState, error) { m, err := newMember(key, value) if err != nil { @@ -179,17 +181,10 @@ func (ts TraceState) Insert(key, value string) (TraceState, error) { } cTS := ts.Delete(key) - if cTS.Len()+1 > maxListMembers { - // TODO (MrAlias): When the second version of the Trace Context - // specification is published this needs to not return an error. - // Instead it should drop the "right-most" member and insert the new - // member at the front. - // - // https://github.com/w3c/trace-context/pull/448 - return ts, fmt.Errorf("failed to insert: %w", errMemberNumber) + if cTS.Len()+1 <= maxListMembers { + cTS.list = append(cTS.list, member{}) } - - cTS.list = append(cTS.list, member{}) + // When the number of members exceeds capacity, drop the "right-most". copy(cTS.list[1:], cTS.list) cTS.list[0] = m diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go index a09bcbb5e8f24..bda8f7cbfae2c 100644 --- a/vendor/go.opentelemetry.io/otel/version.go +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.4.1" + return "1.12.0" } diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml index 3f06f29934673..66f4562221520 100644 --- a/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -14,7 +14,7 @@ module-sets: stable-v1: - version: v1.4.1 + version: v1.12.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opentracing @@ -34,27 +34,23 @@ module-sets: - go.opentelemetry.io/otel/trace - go.opentelemetry.io/otel/sdk experimental-metrics: - version: v0.27.0 + version: v0.35.0 modules: + - go.opentelemetry.io/otel/example/opencensus - go.opentelemetry.io/otel/example/prometheus - go.opentelemetry.io/otel/exporters/otlp/otlpmetric - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp - go.opentelemetry.io/otel/exporters/prometheus - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric - - go.opentelemetry.io/otel/internal/metric - go.opentelemetry.io/otel/metric - - go.opentelemetry.io/otel/sdk/export/metric - go.opentelemetry.io/otel/sdk/metric + - go.opentelemetry.io/otel/bridge/opencensus + - go.opentelemetry.io/otel/bridge/opencensus/test + - go.opentelemetry.io/otel/example/view experimental-schema: - version: v0.0.2 + version: v0.0.3 modules: - go.opentelemetry.io/otel/schema - bridge: - version: v0.27.1 - modules: - - go.opentelemetry.io/otel/bridge/opencensus - - go.opentelemetry.io/otel/bridge/opencensus/test - - go.opentelemetry.io/otel/example/opencensus excluded-modules: - go.opentelemetry.io/otel/internal/tools diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_config.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_config.pb.go deleted file mode 100644 index 07f7e9b1fa355..0000000000000 --- a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_config.pb.go +++ /dev/null @@ -1,573 +0,0 @@ -// Copyright 2019, OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.23.0 -// protoc v3.13.0 -// source: opentelemetry/proto/trace/v1/trace_config.proto - -package v1 - -import ( - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// How spans should be sampled: -// - Always off -// - Always on -// - Always follow the parent Span's decision (off if no parent). -type ConstantSampler_ConstantDecision int32 - -const ( - ConstantSampler_ALWAYS_OFF ConstantSampler_ConstantDecision = 0 - ConstantSampler_ALWAYS_ON ConstantSampler_ConstantDecision = 1 - ConstantSampler_ALWAYS_PARENT ConstantSampler_ConstantDecision = 2 -) - -// Enum value maps for ConstantSampler_ConstantDecision. -var ( - ConstantSampler_ConstantDecision_name = map[int32]string{ - 0: "ALWAYS_OFF", - 1: "ALWAYS_ON", - 2: "ALWAYS_PARENT", - } - ConstantSampler_ConstantDecision_value = map[string]int32{ - "ALWAYS_OFF": 0, - "ALWAYS_ON": 1, - "ALWAYS_PARENT": 2, - } -) - -func (x ConstantSampler_ConstantDecision) Enum() *ConstantSampler_ConstantDecision { - p := new(ConstantSampler_ConstantDecision) - *p = x - return p -} - -func (x ConstantSampler_ConstantDecision) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (ConstantSampler_ConstantDecision) Descriptor() protoreflect.EnumDescriptor { - return file_opentelemetry_proto_trace_v1_trace_config_proto_enumTypes[0].Descriptor() -} - -func (ConstantSampler_ConstantDecision) Type() protoreflect.EnumType { - return &file_opentelemetry_proto_trace_v1_trace_config_proto_enumTypes[0] -} - -func (x ConstantSampler_ConstantDecision) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use ConstantSampler_ConstantDecision.Descriptor instead. -func (ConstantSampler_ConstantDecision) EnumDescriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{1, 0} -} - -// Global configuration of the trace service. All fields must be specified, or -// the default (zero) values will be used for each type. -type TraceConfig struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The global default sampler used to make decisions on span sampling. - // - // Types that are assignable to Sampler: - // *TraceConfig_ConstantSampler - // *TraceConfig_TraceIdRatioBased - // *TraceConfig_RateLimitingSampler - Sampler isTraceConfig_Sampler `protobuf_oneof:"sampler"` - // The global default max number of attributes per span. - MaxNumberOfAttributes int64 `protobuf:"varint,4,opt,name=max_number_of_attributes,json=maxNumberOfAttributes,proto3" json:"max_number_of_attributes,omitempty"` - // The global default max number of annotation events per span. - MaxNumberOfTimedEvents int64 `protobuf:"varint,5,opt,name=max_number_of_timed_events,json=maxNumberOfTimedEvents,proto3" json:"max_number_of_timed_events,omitempty"` - // The global default max number of attributes per timed event. - MaxNumberOfAttributesPerTimedEvent int64 `protobuf:"varint,6,opt,name=max_number_of_attributes_per_timed_event,json=maxNumberOfAttributesPerTimedEvent,proto3" json:"max_number_of_attributes_per_timed_event,omitempty"` - // The global default max number of link entries per span. - MaxNumberOfLinks int64 `protobuf:"varint,7,opt,name=max_number_of_links,json=maxNumberOfLinks,proto3" json:"max_number_of_links,omitempty"` - // The global default max number of attributes per span. - MaxNumberOfAttributesPerLink int64 `protobuf:"varint,8,opt,name=max_number_of_attributes_per_link,json=maxNumberOfAttributesPerLink,proto3" json:"max_number_of_attributes_per_link,omitempty"` -} - -func (x *TraceConfig) Reset() { - *x = TraceConfig{} - if protoimpl.UnsafeEnabled { - mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TraceConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TraceConfig) ProtoMessage() {} - -func (x *TraceConfig) ProtoReflect() protoreflect.Message { - mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TraceConfig.ProtoReflect.Descriptor instead. -func (*TraceConfig) Descriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{0} -} - -func (m *TraceConfig) GetSampler() isTraceConfig_Sampler { - if m != nil { - return m.Sampler - } - return nil -} - -func (x *TraceConfig) GetConstantSampler() *ConstantSampler { - if x, ok := x.GetSampler().(*TraceConfig_ConstantSampler); ok { - return x.ConstantSampler - } - return nil -} - -func (x *TraceConfig) GetTraceIdRatioBased() *TraceIdRatioBased { - if x, ok := x.GetSampler().(*TraceConfig_TraceIdRatioBased); ok { - return x.TraceIdRatioBased - } - return nil -} - -func (x *TraceConfig) GetRateLimitingSampler() *RateLimitingSampler { - if x, ok := x.GetSampler().(*TraceConfig_RateLimitingSampler); ok { - return x.RateLimitingSampler - } - return nil -} - -func (x *TraceConfig) GetMaxNumberOfAttributes() int64 { - if x != nil { - return x.MaxNumberOfAttributes - } - return 0 -} - -func (x *TraceConfig) GetMaxNumberOfTimedEvents() int64 { - if x != nil { - return x.MaxNumberOfTimedEvents - } - return 0 -} - -func (x *TraceConfig) GetMaxNumberOfAttributesPerTimedEvent() int64 { - if x != nil { - return x.MaxNumberOfAttributesPerTimedEvent - } - return 0 -} - -func (x *TraceConfig) GetMaxNumberOfLinks() int64 { - if x != nil { - return x.MaxNumberOfLinks - } - return 0 -} - -func (x *TraceConfig) GetMaxNumberOfAttributesPerLink() int64 { - if x != nil { - return x.MaxNumberOfAttributesPerLink - } - return 0 -} - -type isTraceConfig_Sampler interface { - isTraceConfig_Sampler() -} - -type TraceConfig_ConstantSampler struct { - ConstantSampler *ConstantSampler `protobuf:"bytes,1,opt,name=constant_sampler,json=constantSampler,proto3,oneof"` -} - -type TraceConfig_TraceIdRatioBased struct { - TraceIdRatioBased *TraceIdRatioBased `protobuf:"bytes,2,opt,name=trace_id_ratio_based,json=traceIdRatioBased,proto3,oneof"` -} - -type TraceConfig_RateLimitingSampler struct { - RateLimitingSampler *RateLimitingSampler `protobuf:"bytes,3,opt,name=rate_limiting_sampler,json=rateLimitingSampler,proto3,oneof"` -} - -func (*TraceConfig_ConstantSampler) isTraceConfig_Sampler() {} - -func (*TraceConfig_TraceIdRatioBased) isTraceConfig_Sampler() {} - -func (*TraceConfig_RateLimitingSampler) isTraceConfig_Sampler() {} - -// Sampler that always makes a constant decision on span sampling. -type ConstantSampler struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Decision ConstantSampler_ConstantDecision `protobuf:"varint,1,opt,name=decision,proto3,enum=opentelemetry.proto.trace.v1.ConstantSampler_ConstantDecision" json:"decision,omitempty"` -} - -func (x *ConstantSampler) Reset() { - *x = ConstantSampler{} - if protoimpl.UnsafeEnabled { - mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ConstantSampler) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ConstantSampler) ProtoMessage() {} - -func (x *ConstantSampler) ProtoReflect() protoreflect.Message { - mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ConstantSampler.ProtoReflect.Descriptor instead. -func (*ConstantSampler) Descriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{1} -} - -func (x *ConstantSampler) GetDecision() ConstantSampler_ConstantDecision { - if x != nil { - return x.Decision - } - return ConstantSampler_ALWAYS_OFF -} - -// Sampler that tries to uniformly sample traces with a given ratio. -// The ratio of sampling a trace is equal to that of the specified ratio. -type TraceIdRatioBased struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The desired ratio of sampling. Must be within [0.0, 1.0]. - SamplingRatio float64 `protobuf:"fixed64,1,opt,name=samplingRatio,proto3" json:"samplingRatio,omitempty"` -} - -func (x *TraceIdRatioBased) Reset() { - *x = TraceIdRatioBased{} - if protoimpl.UnsafeEnabled { - mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TraceIdRatioBased) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TraceIdRatioBased) ProtoMessage() {} - -func (x *TraceIdRatioBased) ProtoReflect() protoreflect.Message { - mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TraceIdRatioBased.ProtoReflect.Descriptor instead. -func (*TraceIdRatioBased) Descriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{2} -} - -func (x *TraceIdRatioBased) GetSamplingRatio() float64 { - if x != nil { - return x.SamplingRatio - } - return 0 -} - -// Sampler that tries to sample with a rate per time window. -type RateLimitingSampler struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Rate per second. - Qps int64 `protobuf:"varint,1,opt,name=qps,proto3" json:"qps,omitempty"` -} - -func (x *RateLimitingSampler) Reset() { - *x = RateLimitingSampler{} - if protoimpl.UnsafeEnabled { - mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RateLimitingSampler) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RateLimitingSampler) ProtoMessage() {} - -func (x *RateLimitingSampler) ProtoReflect() protoreflect.Message { - mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RateLimitingSampler.ProtoReflect.Descriptor instead. -func (*RateLimitingSampler) Descriptor() ([]byte, []int) { - return file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{3} -} - -func (x *RateLimitingSampler) GetQps() int64 { - if x != nil { - return x.Qps - } - return 0 -} - -var File_opentelemetry_proto_trace_v1_trace_config_proto protoreflect.FileDescriptor - -var file_opentelemetry_proto_trace_v1_trace_config_proto_rawDesc = []byte{ - 0x0a, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74, - 0x72, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x1c, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x22, - 0x84, 0x05, 0x0a, 0x0b, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, - 0x5a, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x73, 0x61, 0x6d, 0x70, - 0x6c, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x12, 0x62, 0x0a, 0x14, 0x74, - 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x5f, 0x62, 0x61, - 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, - 0x52, 0x61, 0x74, 0x69, 0x6f, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x00, 0x52, 0x11, 0x74, 0x72, - 0x61, 0x63, 0x65, 0x49, 0x64, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x42, 0x61, 0x73, 0x65, 0x64, 0x12, - 0x67, 0x0a, 0x15, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x6e, 0x67, - 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x61, - 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, - 0x72, 0x48, 0x00, 0x52, 0x13, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x6e, - 0x67, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x18, 0x6d, 0x61, 0x78, 0x5f, - 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x4e, - 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x73, 0x12, 0x3a, 0x0a, 0x1a, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, - 0x6f, 0x66, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x16, 0x6d, 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x4f, 0x66, 0x54, 0x69, 0x6d, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x54, 0x0a, - 0x28, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x61, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x22, 0x6d, 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x41, 0x74, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x50, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x64, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x10, 0x6d, 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x4c, 0x69, 0x6e, - 0x6b, 0x73, 0x12, 0x47, 0x0a, 0x21, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x5f, 0x6f, 0x66, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x70, - 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x1c, 0x6d, - 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x50, 0x65, 0x72, 0x4c, 0x69, 0x6e, 0x6b, 0x42, 0x09, 0x0a, 0x07, 0x73, - 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x22, 0xb3, 0x01, 0x0a, 0x0f, 0x43, 0x6f, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x12, 0x5a, 0x0a, 0x08, 0x64, 0x65, - 0x63, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x74, 0x44, 0x65, 0x63, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x65, - 0x63, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x44, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x74, 0x44, 0x65, 0x63, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x4c, - 0x57, 0x41, 0x59, 0x53, 0x5f, 0x4f, 0x46, 0x46, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x41, 0x4c, - 0x57, 0x41, 0x59, 0x53, 0x5f, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x4c, 0x57, - 0x41, 0x59, 0x53, 0x5f, 0x50, 0x41, 0x52, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x22, 0x39, 0x0a, 0x11, - 0x54, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x42, 0x61, 0x73, 0x65, - 0x64, 0x12, 0x24, 0x0a, 0x0d, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x61, 0x74, - 0x69, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, - 0x6e, 0x67, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x22, 0x27, 0x0a, 0x13, 0x52, 0x61, 0x74, 0x65, 0x4c, - 0x69, 0x6d, 0x69, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x12, 0x10, - 0x0a, 0x03, 0x71, 0x70, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x71, 0x70, 0x73, - 0x42, 0x68, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, - 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x42, 0x10, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} - -var ( - file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescOnce sync.Once - file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescData = file_opentelemetry_proto_trace_v1_trace_config_proto_rawDesc -) - -func file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescGZIP() []byte { - file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescOnce.Do(func() { - file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescData) - }) - return file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescData -} - -var file_opentelemetry_proto_trace_v1_trace_config_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_opentelemetry_proto_trace_v1_trace_config_proto_goTypes = []interface{}{ - (ConstantSampler_ConstantDecision)(0), // 0: opentelemetry.proto.trace.v1.ConstantSampler.ConstantDecision - (*TraceConfig)(nil), // 1: opentelemetry.proto.trace.v1.TraceConfig - (*ConstantSampler)(nil), // 2: opentelemetry.proto.trace.v1.ConstantSampler - (*TraceIdRatioBased)(nil), // 3: opentelemetry.proto.trace.v1.TraceIdRatioBased - (*RateLimitingSampler)(nil), // 4: opentelemetry.proto.trace.v1.RateLimitingSampler -} -var file_opentelemetry_proto_trace_v1_trace_config_proto_depIdxs = []int32{ - 2, // 0: opentelemetry.proto.trace.v1.TraceConfig.constant_sampler:type_name -> opentelemetry.proto.trace.v1.ConstantSampler - 3, // 1: opentelemetry.proto.trace.v1.TraceConfig.trace_id_ratio_based:type_name -> opentelemetry.proto.trace.v1.TraceIdRatioBased - 4, // 2: opentelemetry.proto.trace.v1.TraceConfig.rate_limiting_sampler:type_name -> opentelemetry.proto.trace.v1.RateLimitingSampler - 0, // 3: opentelemetry.proto.trace.v1.ConstantSampler.decision:type_name -> opentelemetry.proto.trace.v1.ConstantSampler.ConstantDecision - 4, // [4:4] is the sub-list for method output_type - 4, // [4:4] is the sub-list for method input_type - 4, // [4:4] is the sub-list for extension type_name - 4, // [4:4] is the sub-list for extension extendee - 0, // [0:4] is the sub-list for field type_name -} - -func init() { file_opentelemetry_proto_trace_v1_trace_config_proto_init() } -func file_opentelemetry_proto_trace_v1_trace_config_proto_init() { - if File_opentelemetry_proto_trace_v1_trace_config_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TraceConfig); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ConstantSampler); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TraceIdRatioBased); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RateLimitingSampler); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*TraceConfig_ConstantSampler)(nil), - (*TraceConfig_TraceIdRatioBased)(nil), - (*TraceConfig_RateLimitingSampler)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_opentelemetry_proto_trace_v1_trace_config_proto_rawDesc, - NumEnums: 1, - NumMessages: 4, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_opentelemetry_proto_trace_v1_trace_config_proto_goTypes, - DependencyIndexes: file_opentelemetry_proto_trace_v1_trace_config_proto_depIdxs, - EnumInfos: file_opentelemetry_proto_trace_v1_trace_config_proto_enumTypes, - MessageInfos: file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes, - }.Build() - File_opentelemetry_proto_trace_v1_trace_config_proto = out.File - file_opentelemetry_proto_trace_v1_trace_config_proto_rawDesc = nil - file_opentelemetry_proto_trace_v1_trace_config_proto_goTypes = nil - file_opentelemetry_proto_trace_v1_trace_config_proto_depIdxs = nil -} diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go index 402614f1d3c5a..fc285c089e7b4 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.23.0 -// protoc v3.13.0 +// protoc-gen-go v1.26.0 +// protoc v3.17.3 // source: opentelemetry/proto/collector/trace/v1/trace_service.proto package v1 import ( - proto "github.com/golang/protobuf/proto" v1 "go.opentelemetry.io/proto/otlp/trace/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -36,10 +35,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type ExportTraceServiceRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -96,6 +91,23 @@ type ExportTraceServiceResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + + // The details of a partially successful export request. + // + // If the request is only partially accepted + // (i.e. when the server accepts only parts of the data and rejects the rest) + // the server MUST initialize the `partial_success` field and MUST + // set the `rejected_` with the number of items it rejected. + // + // Servers MAY also make use of the `partial_success` field to convey + // warnings/suggestions to senders even when the request was fully accepted. + // In such cases, the `rejected_` MUST have a value of `0` and + // the `error_message` MUST be non-empty. + // + // A `partial_success` message with an empty value (rejected_ = 0 and + // `error_message` = "") is equivalent to it not being set/present. Senders + // SHOULD interpret it the same way as in the full success case. + PartialSuccess *ExportTracePartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success,omitempty"` } func (x *ExportTraceServiceResponse) Reset() { @@ -130,6 +142,79 @@ func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) { return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP(), []int{1} } +func (x *ExportTraceServiceResponse) GetPartialSuccess() *ExportTracePartialSuccess { + if x != nil { + return x.PartialSuccess + } + return nil +} + +type ExportTracePartialSuccess struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The number of rejected spans. + // + // A `rejected_` field holding a `0` value indicates that the + // request was fully accepted. + RejectedSpans int64 `protobuf:"varint,1,opt,name=rejected_spans,json=rejectedSpans,proto3" json:"rejected_spans,omitempty"` + // A developer-facing human-readable message in English. It should be used + // either to explain why the server rejected parts of the data during a partial + // success or to convey warnings/suggestions during a full success. The message + // should offer guidance on how users can address such issues. + // + // error_message is an optional field. An error_message with an empty value + // is equivalent to it not being set. + ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` +} + +func (x *ExportTracePartialSuccess) Reset() { + *x = ExportTracePartialSuccess{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExportTracePartialSuccess) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExportTracePartialSuccess) ProtoMessage() {} + +func (x *ExportTracePartialSuccess) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExportTracePartialSuccess.ProtoReflect.Descriptor instead. +func (*ExportTracePartialSuccess) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP(), []int{2} +} + +func (x *ExportTracePartialSuccess) GetRejectedSpans() int64 { + if x != nil { + return x.RejectedSpans + } + return 0 +} + +func (x *ExportTracePartialSuccess) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + var File_opentelemetry_proto_collector_trace_v1_trace_service_proto protoreflect.FileDescriptor var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc = []byte{ @@ -149,26 +234,42 @@ var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc = [] 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x22, - 0x1c, 0x0a, 0x1a, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xa2, 0x01, - 0x0a, 0x0c, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x91, - 0x01, 0x0a, 0x06, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x41, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x42, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, - 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x42, 0x73, 0x0a, 0x29, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, - 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, - 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, - 0x11, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, - 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x74, - 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x88, 0x01, 0x0a, 0x1a, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6a, + 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, + 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, + 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74, + 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x22, 0x67, 0x0a, 0x19, 0x45, 0x78, + 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, + 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x6a, 0x65, 0x63, + 0x74, 0x65, 0x64, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x0d, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x23, + 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x32, 0xa2, 0x01, 0x0a, 0x0c, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x12, 0x91, 0x01, 0x0a, 0x06, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x12, + 0x41, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, + 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x42, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, + 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x9c, 0x01, 0x0a, 0x29, 0x69, 0x6f, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xaa, 0x02, + 0x26, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x54, + 0x72, 0x61, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -183,21 +284,23 @@ func file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData } -var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes = make([]protoimpl.MessageInfo, 3) var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_goTypes = []interface{}{ (*ExportTraceServiceRequest)(nil), // 0: opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest (*ExportTraceServiceResponse)(nil), // 1: opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse - (*v1.ResourceSpans)(nil), // 2: opentelemetry.proto.trace.v1.ResourceSpans + (*ExportTracePartialSuccess)(nil), // 2: opentelemetry.proto.collector.trace.v1.ExportTracePartialSuccess + (*v1.ResourceSpans)(nil), // 3: opentelemetry.proto.trace.v1.ResourceSpans } var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_depIdxs = []int32{ - 2, // 0: opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans - 0, // 1: opentelemetry.proto.collector.trace.v1.TraceService.Export:input_type -> opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest - 1, // 2: opentelemetry.proto.collector.trace.v1.TraceService.Export:output_type -> opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse - 2, // [2:3] is the sub-list for method output_type - 1, // [1:2] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 3, // 0: opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans + 2, // 1: opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse.partial_success:type_name -> opentelemetry.proto.collector.trace.v1.ExportTracePartialSuccess + 0, // 2: opentelemetry.proto.collector.trace.v1.TraceService.Export:input_type -> opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest + 1, // 3: opentelemetry.proto.collector.trace.v1.TraceService.Export:output_type -> opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse + 3, // [3:4] is the sub-list for method output_type + 2, // [2:3] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_opentelemetry_proto_collector_trace_v1_trace_service_proto_init() } @@ -230,6 +333,18 @@ func file_opentelemetry_proto_collector_trace_v1_trace_service_proto_init() { return nil } } + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExportTracePartialSuccess); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -237,7 +352,7 @@ func file_opentelemetry_proto_collector_trace_v1_trace_service_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc, NumEnums: 0, - NumMessages: 2, + NumMessages: 3, NumExtensions: 0, NumServices: 1, }, diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go index 18dff3d03e7b8..d142c2a447dc3 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go @@ -13,15 +13,14 @@ import ( "io" "net/http" - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" ) // Suppress "imported and not used" errors @@ -30,7 +29,6 @@ var _ io.Reader var _ status.Status var _ = runtime.String var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage var _ = metadata.Join func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { @@ -79,7 +77,7 @@ func RegisterTraceServiceHandlerServer(ctx context.Context, mux *runtime.ServeMu var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/trace")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -141,7 +139,7 @@ func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMu ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/trace")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return @@ -161,7 +159,7 @@ func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMu } var ( - pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, "")) ) var ( diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go index 4e4c24c0c875d..c21f2cb47cf7c 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go @@ -1,4 +1,8 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.1.0 +// - protoc v3.17.3 +// source: opentelemetry/proto/collector/trace/v1/trace_service.proto package v1 @@ -11,6 +15,7 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 // TraceServiceClient is the client API for TraceService service. @@ -66,7 +71,7 @@ type UnsafeTraceServiceServer interface { } func RegisterTraceServiceServer(s grpc.ServiceRegistrar, srv TraceServiceServer) { - s.RegisterService(&_TraceService_serviceDesc, srv) + s.RegisterService(&TraceService_ServiceDesc, srv) } func _TraceService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { @@ -87,7 +92,10 @@ func _TraceService_Export_Handler(srv interface{}, ctx context.Context, dec func return interceptor(ctx, in, info, handler) } -var _TraceService_serviceDesc = grpc.ServiceDesc{ +// TraceService_ServiceDesc is the grpc.ServiceDesc for TraceService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var TraceService_ServiceDesc = grpc.ServiceDesc{ ServiceName: "opentelemetry.proto.collector.trace.v1.TraceService", HandlerType: (*TraceServiceServer)(nil), Methods: []grpc.MethodDesc{ diff --git a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go index de75b592ca9a3..8502e607b2541 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.23.0 -// protoc v3.13.0 +// protoc-gen-go v1.26.0 +// protoc v3.17.3 // source: opentelemetry/proto/common/v1/common.proto package v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -35,10 +34,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // AnyValue is used to represent any type of attribute value. AnyValue may contain a // primitive value such as a string or integer or it may contain an arbitrary nested // object containing arrays, key-value lists and primitives. @@ -257,6 +252,8 @@ type KeyValueList struct { // A collection of key/value pairs of key-value pairs. The list may be empty (may // contain 0 elements). + // The keys MUST be unique (it is not allowed to have more than one + // value with the same key). Values []*KeyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` } @@ -356,21 +353,22 @@ func (x *KeyValue) GetValue() *AnyValue { return nil } -// StringKeyValue is a pair of key/value strings. This is the simpler (and faster) version -// of KeyValue that only supports string values. -// -// Deprecated: Do not use. -type StringKeyValue struct { +// InstrumentationScope is a message representing the instrumentation scope information +// such as the fully qualified name and version. +type InstrumentationScope struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + // An empty instrumentation scope name means the name is unknown. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + Attributes []*KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` + DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` } -func (x *StringKeyValue) Reset() { - *x = StringKeyValue{} +func (x *InstrumentationScope) Reset() { + *x = InstrumentationScope{} if protoimpl.UnsafeEnabled { mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -378,13 +376,13 @@ func (x *StringKeyValue) Reset() { } } -func (x *StringKeyValue) String() string { +func (x *InstrumentationScope) String() string { return protoimpl.X.MessageStringOf(x) } -func (*StringKeyValue) ProtoMessage() {} +func (*InstrumentationScope) ProtoMessage() {} -func (x *StringKeyValue) ProtoReflect() protoreflect.Message { +func (x *InstrumentationScope) ProtoReflect() protoreflect.Message { mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -396,81 +394,37 @@ func (x *StringKeyValue) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use StringKeyValue.ProtoReflect.Descriptor instead. -func (*StringKeyValue) Descriptor() ([]byte, []int) { +// Deprecated: Use InstrumentationScope.ProtoReflect.Descriptor instead. +func (*InstrumentationScope) Descriptor() ([]byte, []int) { return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{4} } -func (x *StringKeyValue) GetKey() string { +func (x *InstrumentationScope) GetName() string { if x != nil { - return x.Key + return x.Name } return "" } -func (x *StringKeyValue) GetValue() string { +func (x *InstrumentationScope) GetVersion() string { if x != nil { - return x.Value + return x.Version } return "" } -// InstrumentationLibrary is a message representing the instrumentation library information -// such as the fully qualified name and version. -type InstrumentationLibrary struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // An empty instrumentation library name means the name is unknown. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` -} - -func (x *InstrumentationLibrary) Reset() { - *x = InstrumentationLibrary{} - if protoimpl.UnsafeEnabled { - mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *InstrumentationLibrary) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*InstrumentationLibrary) ProtoMessage() {} - -func (x *InstrumentationLibrary) ProtoReflect() protoreflect.Message { - mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use InstrumentationLibrary.ProtoReflect.Descriptor instead. -func (*InstrumentationLibrary) Descriptor() ([]byte, []int) { - return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{5} -} - -func (x *InstrumentationLibrary) GetName() string { +func (x *InstrumentationScope) GetAttributes() []*KeyValue { if x != nil { - return x.Name + return x.Attributes } - return "" + return nil } -func (x *InstrumentationLibrary) GetVersion() string { +func (x *InstrumentationScope) GetDroppedAttributesCount() uint32 { if x != nil { - return x.Version + return x.DroppedAttributesCount } - return "" + return 0 } var File_opentelemetry_proto_common_v1_common_proto protoreflect.FileDescriptor @@ -518,22 +472,28 @@ var file_opentelemetry_proto_common_v1_common_proto_rawDesc = []byte{ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x79, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x3c, 0x0a, 0x0e, 0x53, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x18, 0x01, 0x22, 0x46, 0x0a, 0x16, 0x49, 0x6e, 0x73, - 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x62, 0x72, - 0x61, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x42, 0x5b, 0x0a, 0x20, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, - 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, - 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, - 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xc7, 0x01, 0x0a, 0x14, + 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x63, 0x6f, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, + 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, + 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, + 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, + 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x7b, 0x0a, 0x20, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, + 0x76, 0x31, 0xaa, 0x02, 0x1d, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -548,14 +508,13 @@ func file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP() []byte { return file_opentelemetry_proto_common_v1_common_proto_rawDescData } -var file_opentelemetry_proto_common_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_opentelemetry_proto_common_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 5) var file_opentelemetry_proto_common_v1_common_proto_goTypes = []interface{}{ - (*AnyValue)(nil), // 0: opentelemetry.proto.common.v1.AnyValue - (*ArrayValue)(nil), // 1: opentelemetry.proto.common.v1.ArrayValue - (*KeyValueList)(nil), // 2: opentelemetry.proto.common.v1.KeyValueList - (*KeyValue)(nil), // 3: opentelemetry.proto.common.v1.KeyValue - (*StringKeyValue)(nil), // 4: opentelemetry.proto.common.v1.StringKeyValue - (*InstrumentationLibrary)(nil), // 5: opentelemetry.proto.common.v1.InstrumentationLibrary + (*AnyValue)(nil), // 0: opentelemetry.proto.common.v1.AnyValue + (*ArrayValue)(nil), // 1: opentelemetry.proto.common.v1.ArrayValue + (*KeyValueList)(nil), // 2: opentelemetry.proto.common.v1.KeyValueList + (*KeyValue)(nil), // 3: opentelemetry.proto.common.v1.KeyValue + (*InstrumentationScope)(nil), // 4: opentelemetry.proto.common.v1.InstrumentationScope } var file_opentelemetry_proto_common_v1_common_proto_depIdxs = []int32{ 1, // 0: opentelemetry.proto.common.v1.AnyValue.array_value:type_name -> opentelemetry.proto.common.v1.ArrayValue @@ -563,11 +522,12 @@ var file_opentelemetry_proto_common_v1_common_proto_depIdxs = []int32{ 0, // 2: opentelemetry.proto.common.v1.ArrayValue.values:type_name -> opentelemetry.proto.common.v1.AnyValue 3, // 3: opentelemetry.proto.common.v1.KeyValueList.values:type_name -> opentelemetry.proto.common.v1.KeyValue 0, // 4: opentelemetry.proto.common.v1.KeyValue.value:type_name -> opentelemetry.proto.common.v1.AnyValue - 5, // [5:5] is the sub-list for method output_type - 5, // [5:5] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name + 3, // 5: opentelemetry.proto.common.v1.InstrumentationScope.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name } func init() { file_opentelemetry_proto_common_v1_common_proto_init() } @@ -625,19 +585,7 @@ func file_opentelemetry_proto_common_v1_common_proto_init() { } } file_opentelemetry_proto_common_v1_common_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StringKeyValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_opentelemetry_proto_common_v1_common_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InstrumentationLibrary); i { + switch v := v.(*InstrumentationScope); i { case 0: return &v.state case 1: @@ -664,7 +612,7 @@ func file_opentelemetry_proto_common_v1_common_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_opentelemetry_proto_common_v1_common_proto_rawDesc, NumEnums: 0, - NumMessages: 6, + NumMessages: 5, NumExtensions: 0, NumServices: 0, }, diff --git a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go index ac347acb2335b..bcc1060e3dd2e 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.23.0 -// protoc v3.13.0 +// protoc-gen-go v1.26.0 +// protoc v3.17.3 // source: opentelemetry/proto/resource/v1/resource.proto package v1 import ( - proto "github.com/golang/protobuf/proto" v1 "go.opentelemetry.io/proto/otlp/common/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" @@ -36,17 +35,15 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Resource information. type Resource struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Set of labels that describe the resource. + // Set of attributes that describe the resource. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). Attributes []*v1.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"` // dropped_attributes_count is the number of dropped attributes. If the value is 0, then // no attributes were dropped. @@ -118,14 +115,16 @@ var file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = []byte{ 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x61, 0x0a, - 0x22, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, - 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x2e, 0x76, 0x31, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, - 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, - 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, 0x31, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x83, 0x01, + 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, + 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, + 0x31, 0xaa, 0x02, 0x1f, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go index abf0b4c16831b..499a43d77bb92 100644 --- a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go +++ b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go @@ -14,14 +14,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.23.0 -// protoc v3.13.0 +// protoc-gen-go v1.26.0 +// protoc v3.17.3 // source: opentelemetry/proto/trace/v1/trace.proto package v1 import ( - proto "github.com/golang/protobuf/proto" v11 "go.opentelemetry.io/proto/otlp/common/v1" v1 "go.opentelemetry.io/proto/otlp/resource/v1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -37,10 +36,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // SpanKind is the type of span. Can be used to specify additional relationships between spans // in addition to a parent/child relationship. type Span_SpanKind int32 @@ -232,7 +227,7 @@ func (x *TracesData) GetResourceSpans() []*ResourceSpans { return nil } -// A collection of InstrumentationLibrarySpans from a Resource. +// A collection of ScopeSpans from a Resource. type ResourceSpans struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -241,11 +236,10 @@ type ResourceSpans struct { // The resource for the spans in this message. // If this field is not set then no resource info is known. Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` - // A list of InstrumentationLibrarySpans that originate from a resource. - InstrumentationLibrarySpans []*InstrumentationLibrarySpans `protobuf:"bytes,2,rep,name=instrumentation_library_spans,json=instrumentationLibrarySpans,proto3" json:"instrumentation_library_spans,omitempty"` + // A list of ScopeSpans that originate from a resource. + ScopeSpans []*ScopeSpans `protobuf:"bytes,2,rep,name=scope_spans,json=scopeSpans,proto3" json:"scope_spans,omitempty"` // This schema_url applies to the data in the "resource" field. It does not apply - // to the data in the "instrumentation_library_spans" field which have their own - // schema_url field. + // to the data in the "scope_spans" field which have their own schema_url field. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` } @@ -288,9 +282,9 @@ func (x *ResourceSpans) GetResource() *v1.Resource { return nil } -func (x *ResourceSpans) GetInstrumentationLibrarySpans() []*InstrumentationLibrarySpans { +func (x *ResourceSpans) GetScopeSpans() []*ScopeSpans { if x != nil { - return x.InstrumentationLibrarySpans + return x.ScopeSpans } return nil } @@ -302,24 +296,24 @@ func (x *ResourceSpans) GetSchemaUrl() string { return "" } -// A collection of Spans produced by an InstrumentationLibrary. -type InstrumentationLibrarySpans struct { +// A collection of Spans produced by an InstrumentationScope. +type ScopeSpans struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The instrumentation library information for the spans in this message. - // Semantically when InstrumentationLibrary isn't set, it is equivalent with - // an empty instrumentation library name (unknown). - InstrumentationLibrary *v11.InstrumentationLibrary `protobuf:"bytes,1,opt,name=instrumentation_library,json=instrumentationLibrary,proto3" json:"instrumentation_library,omitempty"` - // A list of Spans that originate from an instrumentation library. + // The instrumentation scope information for the spans in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope *v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope,omitempty"` + // A list of Spans that originate from an instrumentation scope. Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` // This schema_url applies to all spans and span events in the "spans" field. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` } -func (x *InstrumentationLibrarySpans) Reset() { - *x = InstrumentationLibrarySpans{} +func (x *ScopeSpans) Reset() { + *x = ScopeSpans{} if protoimpl.UnsafeEnabled { mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -327,13 +321,13 @@ func (x *InstrumentationLibrarySpans) Reset() { } } -func (x *InstrumentationLibrarySpans) String() string { +func (x *ScopeSpans) String() string { return protoimpl.X.MessageStringOf(x) } -func (*InstrumentationLibrarySpans) ProtoMessage() {} +func (*ScopeSpans) ProtoMessage() {} -func (x *InstrumentationLibrarySpans) ProtoReflect() protoreflect.Message { +func (x *ScopeSpans) ProtoReflect() protoreflect.Message { mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -345,39 +339,33 @@ func (x *InstrumentationLibrarySpans) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use InstrumentationLibrarySpans.ProtoReflect.Descriptor instead. -func (*InstrumentationLibrarySpans) Descriptor() ([]byte, []int) { +// Deprecated: Use ScopeSpans.ProtoReflect.Descriptor instead. +func (*ScopeSpans) Descriptor() ([]byte, []int) { return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{2} } -func (x *InstrumentationLibrarySpans) GetInstrumentationLibrary() *v11.InstrumentationLibrary { +func (x *ScopeSpans) GetScope() *v11.InstrumentationScope { if x != nil { - return x.InstrumentationLibrary + return x.Scope } return nil } -func (x *InstrumentationLibrarySpans) GetSpans() []*Span { +func (x *ScopeSpans) GetSpans() []*Span { if x != nil { return x.Spans } return nil } -func (x *InstrumentationLibrarySpans) GetSchemaUrl() string { +func (x *ScopeSpans) GetSchemaUrl() string { if x != nil { return x.SchemaUrl } return "" } -// Span represents a single operation within a trace. Spans can be -// nested to form a trace tree. Spans may also be linked to other spans -// from the same or different trace and form graphs. Often, a trace -// contains a root span that describes the end-to-end latency, and one -// or more subspans for its sub-operations. A trace can also contain -// multiple root spans, or none at all. Spans do not need to be -// contiguous - there may be gaps or overlaps between spans in a trace. +// A Span represents a single operation performed by a single component of the system. // // The next available field id is 17. type Span struct { @@ -449,7 +437,9 @@ type Span struct { // "abc.com/score": 10.239 // // The OpenTelemetry API specification further restricts the allowed value types: - // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/common.md#attributes + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). Attributes []*v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"` // dropped_attributes_count is the number of attributes that were discarded. Attributes // can be discarded because their keys are too long or because there are too many @@ -680,6 +670,8 @@ type Span_Event struct { // This field is semantically required to be set to non-empty string. Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` // attributes is a collection of attribute key/value pairs on the event. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). Attributes []*v11.KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` // dropped_attributes_count is the number of dropped attributes. If the value is 0, // then no attributes were dropped. @@ -763,6 +755,8 @@ type Span_Link struct { // The trace_state associated with the link. TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` // attributes is a collection of attribute key/value pairs on the link. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). Attributes []*v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty"` // dropped_attributes_count is the number of dropped attributes. If the value is 0, // then no attributes were dropped. @@ -855,137 +849,133 @@ var file_opentelemetry_proto_trace_v1_trace_proto_rawDesc = []byte{ 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x22, 0xf4, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x22, 0xc8, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, - 0x7d, 0x0a, 0x1d, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, - 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, - 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x70, 0x61, 0x6e, - 0x73, 0x52, 0x1b, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, - 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, 0x72, 0x6c, 0x22, 0xe6, 0x01, - 0x0a, 0x1b, 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x6e, 0x0a, - 0x17, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, + 0x49, 0x0a, 0x0b, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, + 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x52, 0x0a, + 0x73, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, 0x72, 0x6c, 0x4a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9, + 0x07, 0x22, 0xb0, 0x01, 0x0a, 0x0a, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, + 0x12, 0x49, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x33, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x63, 0x6f, 0x70, 0x65, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x05, 0x73, + 0x70, 0x61, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x05, + 0x73, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, + 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x55, 0x72, 0x6c, 0x22, 0x9c, 0x0a, 0x0a, 0x04, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x19, 0x0a, + 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e, + 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, + 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x70, 0x61, + 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x04, + 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x53, + 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x2f, 0x0a, + 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, + 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28, 0x06, 0x52, 0x11, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x2b, + 0x0a, 0x12, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, + 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x08, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0f, 0x65, 0x6e, 0x64, 0x54, + 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x47, 0x0a, 0x0a, 0x61, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, + 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, + 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x40, + 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x49, - 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, - 0x62, 0x72, 0x61, 0x72, 0x79, 0x52, 0x16, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x12, 0x38, 0x0a, - 0x05, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, - 0x52, 0x05, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x55, 0x72, 0x6c, 0x22, 0x9c, 0x0a, 0x0a, 0x04, 0x53, 0x70, 0x61, 0x6e, 0x12, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, + 0x61, 0x6e, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x12, 0x30, 0x0a, 0x14, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, + 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, + 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x6e, + 0x6b, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, + 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x43, 0x6f, 0x75, 0x6e, + 0x74, 0x12, 0x3c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, + 0xc4, 0x01, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, + 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, + 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, + 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0xde, 0x01, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, - 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, - 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, - 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, - 0x2f, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, - 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28, 0x06, 0x52, 0x11, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, - 0x12, 0x2b, 0x0a, 0x12, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, - 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x08, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0f, 0x65, 0x6e, - 0x64, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x47, 0x0a, - 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, - 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, - 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, - 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, - 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x12, 0x40, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x12, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x43, - 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x0d, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, - 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, - 0x6e, 0x6b, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6c, - 0x69, 0x6e, 0x6b, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x11, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x43, 0x6f, - 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x0f, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, - 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x1a, 0xc4, 0x01, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x74, - 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, - 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, - 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0xde, 0x01, 0x0a, 0x04, 0x4c, 0x69, 0x6e, - 0x6b, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, - 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, - 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x63, - 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, - 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, - 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x99, 0x01, 0x0a, 0x08, 0x53, 0x70, - 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, - 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, - 0x00, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x49, - 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41, - 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x12, - 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4c, 0x49, - 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, - 0x4e, 0x44, 0x5f, 0x50, 0x52, 0x4f, 0x44, 0x55, 0x43, 0x45, 0x52, 0x10, 0x04, 0x12, 0x16, 0x0a, - 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x53, 0x55, - 0x4d, 0x45, 0x52, 0x10, 0x05, 0x22, 0xbd, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x63, 0x6f, - 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, - 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, - 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x22, - 0x4e, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x15, 0x0a, - 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, - 0x45, 0x54, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, - 0x4f, 0x44, 0x45, 0x5f, 0x4f, 0x4b, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, - 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x4a, - 0x04, 0x08, 0x01, 0x10, 0x02, 0x42, 0x58, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, - 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, + 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x99, 0x01, 0x0a, 0x08, 0x53, 0x70, 0x61, 0x6e, + 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, + 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x49, 0x4e, 0x54, + 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41, 0x4e, 0x5f, + 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x12, 0x14, 0x0a, + 0x10, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, + 0x54, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, + 0x5f, 0x50, 0x52, 0x4f, 0x44, 0x55, 0x43, 0x45, 0x52, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x53, + 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x53, 0x55, 0x4d, 0x45, + 0x52, 0x10, 0x05, 0x22, 0xbd, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, + 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, + 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x22, 0x4e, 0x0a, + 0x0a, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, + 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x45, 0x54, + 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, + 0x45, 0x5f, 0x4f, 0x4b, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, + 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x4a, 0x04, 0x08, + 0x01, 0x10, 0x02, 0x42, 0x77, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, + 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1c, + 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1003,25 +993,25 @@ func file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP() []byte { var file_opentelemetry_proto_trace_v1_trace_proto_enumTypes = make([]protoimpl.EnumInfo, 2) var file_opentelemetry_proto_trace_v1_trace_proto_msgTypes = make([]protoimpl.MessageInfo, 7) var file_opentelemetry_proto_trace_v1_trace_proto_goTypes = []interface{}{ - (Span_SpanKind)(0), // 0: opentelemetry.proto.trace.v1.Span.SpanKind - (Status_StatusCode)(0), // 1: opentelemetry.proto.trace.v1.Status.StatusCode - (*TracesData)(nil), // 2: opentelemetry.proto.trace.v1.TracesData - (*ResourceSpans)(nil), // 3: opentelemetry.proto.trace.v1.ResourceSpans - (*InstrumentationLibrarySpans)(nil), // 4: opentelemetry.proto.trace.v1.InstrumentationLibrarySpans - (*Span)(nil), // 5: opentelemetry.proto.trace.v1.Span - (*Status)(nil), // 6: opentelemetry.proto.trace.v1.Status - (*Span_Event)(nil), // 7: opentelemetry.proto.trace.v1.Span.Event - (*Span_Link)(nil), // 8: opentelemetry.proto.trace.v1.Span.Link - (*v1.Resource)(nil), // 9: opentelemetry.proto.resource.v1.Resource - (*v11.InstrumentationLibrary)(nil), // 10: opentelemetry.proto.common.v1.InstrumentationLibrary - (*v11.KeyValue)(nil), // 11: opentelemetry.proto.common.v1.KeyValue + (Span_SpanKind)(0), // 0: opentelemetry.proto.trace.v1.Span.SpanKind + (Status_StatusCode)(0), // 1: opentelemetry.proto.trace.v1.Status.StatusCode + (*TracesData)(nil), // 2: opentelemetry.proto.trace.v1.TracesData + (*ResourceSpans)(nil), // 3: opentelemetry.proto.trace.v1.ResourceSpans + (*ScopeSpans)(nil), // 4: opentelemetry.proto.trace.v1.ScopeSpans + (*Span)(nil), // 5: opentelemetry.proto.trace.v1.Span + (*Status)(nil), // 6: opentelemetry.proto.trace.v1.Status + (*Span_Event)(nil), // 7: opentelemetry.proto.trace.v1.Span.Event + (*Span_Link)(nil), // 8: opentelemetry.proto.trace.v1.Span.Link + (*v1.Resource)(nil), // 9: opentelemetry.proto.resource.v1.Resource + (*v11.InstrumentationScope)(nil), // 10: opentelemetry.proto.common.v1.InstrumentationScope + (*v11.KeyValue)(nil), // 11: opentelemetry.proto.common.v1.KeyValue } var file_opentelemetry_proto_trace_v1_trace_proto_depIdxs = []int32{ 3, // 0: opentelemetry.proto.trace.v1.TracesData.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans 9, // 1: opentelemetry.proto.trace.v1.ResourceSpans.resource:type_name -> opentelemetry.proto.resource.v1.Resource - 4, // 2: opentelemetry.proto.trace.v1.ResourceSpans.instrumentation_library_spans:type_name -> opentelemetry.proto.trace.v1.InstrumentationLibrarySpans - 10, // 3: opentelemetry.proto.trace.v1.InstrumentationLibrarySpans.instrumentation_library:type_name -> opentelemetry.proto.common.v1.InstrumentationLibrary - 5, // 4: opentelemetry.proto.trace.v1.InstrumentationLibrarySpans.spans:type_name -> opentelemetry.proto.trace.v1.Span + 4, // 2: opentelemetry.proto.trace.v1.ResourceSpans.scope_spans:type_name -> opentelemetry.proto.trace.v1.ScopeSpans + 10, // 3: opentelemetry.proto.trace.v1.ScopeSpans.scope:type_name -> opentelemetry.proto.common.v1.InstrumentationScope + 5, // 4: opentelemetry.proto.trace.v1.ScopeSpans.spans:type_name -> opentelemetry.proto.trace.v1.Span 0, // 5: opentelemetry.proto.trace.v1.Span.kind:type_name -> opentelemetry.proto.trace.v1.Span.SpanKind 11, // 6: opentelemetry.proto.trace.v1.Span.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue 7, // 7: opentelemetry.proto.trace.v1.Span.events:type_name -> opentelemetry.proto.trace.v1.Span.Event @@ -1068,7 +1058,7 @@ func file_opentelemetry_proto_trace_v1_trace_proto_init() { } } file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*InstrumentationLibrarySpans); i { + switch v := v.(*ScopeSpans); i { case 0: return &v.state case 1: diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE new file mode 100644 index 0000000000000..6a66aea5eafe0 --- /dev/null +++ b/vendor/golang.org/x/mod/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/mod/PATENTS b/vendor/golang.org/x/mod/PATENTS new file mode 100644 index 0000000000000..733099041f84f --- /dev/null +++ b/vendor/golang.org/x/mod/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/mod/semver/semver.go b/vendor/golang.org/x/mod/semver/semver.go new file mode 100644 index 0000000000000..a30a22bf20f18 --- /dev/null +++ b/vendor/golang.org/x/mod/semver/semver.go @@ -0,0 +1,401 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package semver implements comparison of semantic version strings. +// In this package, semantic version strings must begin with a leading "v", +// as in "v1.0.0". +// +// The general form of a semantic version string accepted by this package is +// +// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]] +// +// where square brackets indicate optional parts of the syntax; +// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros; +// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers +// using only alphanumeric characters and hyphens; and +// all-numeric PRERELEASE identifiers must not have leading zeros. +// +// This package follows Semantic Versioning 2.0.0 (see semver.org) +// with two exceptions. First, it requires the "v" prefix. Second, it recognizes +// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes) +// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0. +package semver + +import "sort" + +// parsed returns the parsed form of a semantic version string. +type parsed struct { + major string + minor string + patch string + short string + prerelease string + build string +} + +// IsValid reports whether v is a valid semantic version string. +func IsValid(v string) bool { + _, ok := parse(v) + return ok +} + +// Canonical returns the canonical formatting of the semantic version v. +// It fills in any missing .MINOR or .PATCH and discards build metadata. +// Two semantic versions compare equal only if their canonical formattings +// are identical strings. +// The canonical invalid semantic version is the empty string. +func Canonical(v string) string { + p, ok := parse(v) + if !ok { + return "" + } + if p.build != "" { + return v[:len(v)-len(p.build)] + } + if p.short != "" { + return v + p.short + } + return v +} + +// Major returns the major version prefix of the semantic version v. +// For example, Major("v2.1.0") == "v2". +// If v is an invalid semantic version string, Major returns the empty string. +func Major(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return v[:1+len(pv.major)] +} + +// MajorMinor returns the major.minor version prefix of the semantic version v. +// For example, MajorMinor("v2.1.0") == "v2.1". +// If v is an invalid semantic version string, MajorMinor returns the empty string. +func MajorMinor(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + i := 1 + len(pv.major) + if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor { + return v[:j] + } + return v[:i] + "." + pv.minor +} + +// Prerelease returns the prerelease suffix of the semantic version v. +// For example, Prerelease("v2.1.0-pre+meta") == "-pre". +// If v is an invalid semantic version string, Prerelease returns the empty string. +func Prerelease(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.prerelease +} + +// Build returns the build suffix of the semantic version v. +// For example, Build("v2.1.0+meta") == "+meta". +// If v is an invalid semantic version string, Build returns the empty string. +func Build(v string) string { + pv, ok := parse(v) + if !ok { + return "" + } + return pv.build +} + +// Compare returns an integer comparing two versions according to +// semantic version precedence. +// The result will be 0 if v == w, -1 if v < w, or +1 if v > w. +// +// An invalid semantic version string is considered less than a valid one. +// All invalid semantic version strings compare equal to each other. +func Compare(v, w string) int { + pv, ok1 := parse(v) + pw, ok2 := parse(w) + if !ok1 && !ok2 { + return 0 + } + if !ok1 { + return -1 + } + if !ok2 { + return +1 + } + if c := compareInt(pv.major, pw.major); c != 0 { + return c + } + if c := compareInt(pv.minor, pw.minor); c != 0 { + return c + } + if c := compareInt(pv.patch, pw.patch); c != 0 { + return c + } + return comparePrerelease(pv.prerelease, pw.prerelease) +} + +// Max canonicalizes its arguments and then returns the version string +// that compares greater. +// +// Deprecated: use Compare instead. In most cases, returning a canonicalized +// version is not expected or desired. +func Max(v, w string) string { + v = Canonical(v) + w = Canonical(w) + if Compare(v, w) > 0 { + return v + } + return w +} + +// ByVersion implements sort.Interface for sorting semantic version strings. +type ByVersion []string + +func (vs ByVersion) Len() int { return len(vs) } +func (vs ByVersion) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } +func (vs ByVersion) Less(i, j int) bool { + cmp := Compare(vs[i], vs[j]) + if cmp != 0 { + return cmp < 0 + } + return vs[i] < vs[j] +} + +// Sort sorts a list of semantic version strings using ByVersion. +func Sort(list []string) { + sort.Sort(ByVersion(list)) +} + +func parse(v string) (p parsed, ok bool) { + if v == "" || v[0] != 'v' { + return + } + p.major, v, ok = parseInt(v[1:]) + if !ok { + return + } + if v == "" { + p.minor = "0" + p.patch = "0" + p.short = ".0.0" + return + } + if v[0] != '.' { + ok = false + return + } + p.minor, v, ok = parseInt(v[1:]) + if !ok { + return + } + if v == "" { + p.patch = "0" + p.short = ".0" + return + } + if v[0] != '.' { + ok = false + return + } + p.patch, v, ok = parseInt(v[1:]) + if !ok { + return + } + if len(v) > 0 && v[0] == '-' { + p.prerelease, v, ok = parsePrerelease(v) + if !ok { + return + } + } + if len(v) > 0 && v[0] == '+' { + p.build, v, ok = parseBuild(v) + if !ok { + return + } + } + if v != "" { + ok = false + return + } + ok = true + return +} + +func parseInt(v string) (t, rest string, ok bool) { + if v == "" { + return + } + if v[0] < '0' || '9' < v[0] { + return + } + i := 1 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + if v[0] == '0' && i != 1 { + return + } + return v[:i], v[i:], true +} + +func parsePrerelease(v string) (t, rest string, ok bool) { + // "A pre-release version MAY be denoted by appending a hyphen and + // a series of dot separated identifiers immediately following the patch version. + // Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-]. + // Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes." + if v == "" || v[0] != '-' { + return + } + i := 1 + start := 1 + for i < len(v) && v[i] != '+' { + if !isIdentChar(v[i]) && v[i] != '.' { + return + } + if v[i] == '.' { + if start == i || isBadNum(v[start:i]) { + return + } + start = i + 1 + } + i++ + } + if start == i || isBadNum(v[start:i]) { + return + } + return v[:i], v[i:], true +} + +func parseBuild(v string) (t, rest string, ok bool) { + if v == "" || v[0] != '+' { + return + } + i := 1 + start := 1 + for i < len(v) { + if !isIdentChar(v[i]) && v[i] != '.' { + return + } + if v[i] == '.' { + if start == i { + return + } + start = i + 1 + } + i++ + } + if start == i { + return + } + return v[:i], v[i:], true +} + +func isIdentChar(c byte) bool { + return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-' +} + +func isBadNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) && i > 1 && v[0] == '0' +} + +func isNum(v string) bool { + i := 0 + for i < len(v) && '0' <= v[i] && v[i] <= '9' { + i++ + } + return i == len(v) +} + +func compareInt(x, y string) int { + if x == y { + return 0 + } + if len(x) < len(y) { + return -1 + } + if len(x) > len(y) { + return +1 + } + if x < y { + return -1 + } else { + return +1 + } +} + +func comparePrerelease(x, y string) int { + // "When major, minor, and patch are equal, a pre-release version has + // lower precedence than a normal version. + // Example: 1.0.0-alpha < 1.0.0. + // Precedence for two pre-release versions with the same major, minor, + // and patch version MUST be determined by comparing each dot separated + // identifier from left to right until a difference is found as follows: + // identifiers consisting of only digits are compared numerically and + // identifiers with letters or hyphens are compared lexically in ASCII + // sort order. Numeric identifiers always have lower precedence than + // non-numeric identifiers. A larger set of pre-release fields has a + // higher precedence than a smaller set, if all of the preceding + // identifiers are equal. + // Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta < + // 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0." + if x == y { + return 0 + } + if x == "" { + return +1 + } + if y == "" { + return -1 + } + for x != "" && y != "" { + x = x[1:] // skip - or . + y = y[1:] // skip - or . + var dx, dy string + dx, x = nextIdent(x) + dy, y = nextIdent(y) + if dx != dy { + ix := isNum(dx) + iy := isNum(dy) + if ix != iy { + if ix { + return -1 + } else { + return +1 + } + } + if ix { + if len(dx) < len(dy) { + return -1 + } + if len(dx) > len(dy) { + return +1 + } + } + if dx < dy { + return -1 + } else { + return +1 + } + } + } + if x == "" { + return -1 + } else { + return +1 + } +} + +func nextIdent(x string) (dx, rest string) { + i := 0 + for i < len(x) && x[i] != '.' { + i++ + } + return x[:i], x[i:] +} diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go index e917195d53af4..2bf3202b290f1 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/aws.go @@ -62,6 +62,13 @@ const ( // The AWS authorization header name for the auto-generated date. awsDateHeader = "x-amz-date" + // Supported AWS configuration environment variables. + awsAccessKeyId = "AWS_ACCESS_KEY_ID" + awsDefaultRegion = "AWS_DEFAULT_REGION" + awsRegion = "AWS_REGION" + awsSecretAccessKey = "AWS_SECRET_ACCESS_KEY" + awsSessionToken = "AWS_SESSION_TOKEN" + awsTimeFormatLong = "20060102T150405Z" awsTimeFormatShort = "20060102" ) @@ -267,6 +274,49 @@ type awsRequest struct { Headers []awsRequestHeader `json:"headers"` } +func (cs awsCredentialSource) validateMetadataServers() error { + if err := cs.validateMetadataServer(cs.RegionURL, "region_url"); err != nil { + return err + } + if err := cs.validateMetadataServer(cs.CredVerificationURL, "url"); err != nil { + return err + } + return cs.validateMetadataServer(cs.IMDSv2SessionTokenURL, "imdsv2_session_token_url") +} + +var validHostnames []string = []string{"169.254.169.254", "fd00:ec2::254"} + +func (cs awsCredentialSource) isValidMetadataServer(metadataUrl string) bool { + if metadataUrl == "" { + // Zero value means use default, which is valid. + return true + } + + u, err := url.Parse(metadataUrl) + if err != nil { + // Unparseable URL means invalid + return false + } + + for _, validHostname := range validHostnames { + if u.Hostname() == validHostname { + // If it's one of the valid hostnames, everything is good + return true + } + } + + // hostname not found in our allowlist, so not valid + return false +} + +func (cs awsCredentialSource) validateMetadataServer(metadataUrl, urlName string) error { + if !cs.isValidMetadataServer(metadataUrl) { + return fmt.Errorf("oauth2/google: invalid hostname %s for %s", metadataUrl, urlName) + } + + return nil +} + func (cs awsCredentialSource) doRequest(req *http.Request) (*http.Response, error) { if cs.client == nil { cs.client = oauth2.NewClient(cs.ctx, nil) @@ -274,16 +324,33 @@ func (cs awsCredentialSource) doRequest(req *http.Request) (*http.Response, erro return cs.client.Do(req.WithContext(cs.ctx)) } +func canRetrieveRegionFromEnvironment() bool { + // The AWS region can be provided through AWS_REGION or AWS_DEFAULT_REGION. Only one is + // required. + return getenv(awsRegion) != "" || getenv(awsDefaultRegion) != "" +} + +func canRetrieveSecurityCredentialFromEnvironment() bool { + // Check if both AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY are available. + return getenv(awsAccessKeyId) != "" && getenv(awsSecretAccessKey) != "" +} + +func shouldUseMetadataServer() bool { + return !canRetrieveRegionFromEnvironment() || !canRetrieveSecurityCredentialFromEnvironment() +} + func (cs awsCredentialSource) subjectToken() (string, error) { if cs.requestSigner == nil { - awsSessionToken, err := cs.getAWSSessionToken() - if err != nil { - return "", err - } - headers := make(map[string]string) - if awsSessionToken != "" { - headers[awsIMDSv2SessionTokenHeader] = awsSessionToken + if shouldUseMetadataServer() { + awsSessionToken, err := cs.getAWSSessionToken() + if err != nil { + return "", err + } + + if awsSessionToken != "" { + headers[awsIMDSv2SessionTokenHeader] = awsSessionToken + } } awsSecurityCredentials, err := cs.getSecurityCredentials(headers) @@ -389,11 +456,11 @@ func (cs *awsCredentialSource) getAWSSessionToken() (string, error) { } func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, error) { - if envAwsRegion := getenv("AWS_REGION"); envAwsRegion != "" { - return envAwsRegion, nil - } - if envAwsRegion := getenv("AWS_DEFAULT_REGION"); envAwsRegion != "" { - return envAwsRegion, nil + if canRetrieveRegionFromEnvironment() { + if envAwsRegion := getenv(awsRegion); envAwsRegion != "" { + return envAwsRegion, nil + } + return getenv("AWS_DEFAULT_REGION"), nil } if cs.RegionURL == "" { @@ -434,14 +501,12 @@ func (cs *awsCredentialSource) getRegion(headers map[string]string) (string, err } func (cs *awsCredentialSource) getSecurityCredentials(headers map[string]string) (result awsSecurityCredentials, err error) { - if accessKeyID := getenv("AWS_ACCESS_KEY_ID"); accessKeyID != "" { - if secretAccessKey := getenv("AWS_SECRET_ACCESS_KEY"); secretAccessKey != "" { - return awsSecurityCredentials{ - AccessKeyID: accessKeyID, - SecretAccessKey: secretAccessKey, - SecurityToken: getenv("AWS_SESSION_TOKEN"), - }, nil - } + if canRetrieveSecurityCredentialFromEnvironment() { + return awsSecurityCredentials{ + AccessKeyID: getenv(awsAccessKeyId), + SecretAccessKey: getenv(awsSecretAccessKey), + SecurityToken: getenv(awsSessionToken), + }, nil } roleName, err := cs.getMetadataRoleName(headers) diff --git a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go index 9fc35535e7fe3..3eab8df7ced77 100644 --- a/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go +++ b/vendor/golang.org/x/oauth2/google/internal/externalaccount/basecredentials.go @@ -213,6 +213,10 @@ func (c *Config) parse(ctx context.Context) (baseCredentialSource, error) { awsCredSource.IMDSv2SessionTokenURL = c.CredentialSource.IMDSv2SessionTokenURL } + if err := awsCredSource.validateMetadataServers(); err != nil { + return nil, err + } + return awsCredSource, nil } } else if c.CredentialSource.File != "" { diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index 355c386961dd4..b4723fcacea76 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -19,8 +19,6 @@ import ( "strings" "sync" "time" - - "golang.org/x/net/context/ctxhttp" ) // Token represents the credentials used to authorize @@ -229,7 +227,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, } func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { - r, err := ctxhttp.Do(ctx, ContextClient(ctx), req) + r, err := ContextClient(ctx).Do(req.WithContext(ctx)) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/sys/cpu/hwcap_linux.go b/vendor/golang.org/x/sys/cpu/hwcap_linux.go index f3baa379328fe..1d9d91f3ed81a 100644 --- a/vendor/golang.org/x/sys/cpu/hwcap_linux.go +++ b/vendor/golang.org/x/sys/cpu/hwcap_linux.go @@ -24,6 +24,21 @@ var hwCap uint var hwCap2 uint func readHWCAP() error { + // For Go 1.21+, get auxv from the Go runtime. + if a := getAuxv(); len(a) > 0 { + for len(a) >= 2 { + tag, val := a[0], uint(a[1]) + a = a[2:] + switch tag { + case _AT_HWCAP: + hwCap = val + case _AT_HWCAP2: + hwCap2 = val + } + } + return nil + } + buf, err := ioutil.ReadFile(procAuxv) if err != nil { // e.g. on android /proc/self/auxv is not accessible, so silently diff --git a/vendor/golang.org/x/sys/cpu/runtime_auxv.go b/vendor/golang.org/x/sys/cpu/runtime_auxv.go new file mode 100644 index 0000000000000..5f92ac9a2e2b9 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/runtime_auxv.go @@ -0,0 +1,16 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +// getAuxvFn is non-nil on Go 1.21+ (via runtime_auxv_go121.go init) +// on platforms that use auxv. +var getAuxvFn func() []uintptr + +func getAuxv() []uintptr { + if getAuxvFn == nil { + return nil + } + return getAuxvFn() +} diff --git a/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go b/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go new file mode 100644 index 0000000000000..b975ea2a04ef2 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/runtime_auxv_go121.go @@ -0,0 +1,19 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 +// +build go1.21 + +package cpu + +import ( + _ "unsafe" // for linkname +) + +//go:linkname runtime_getAuxv runtime.getAuxv +func runtime_getAuxv() []uintptr + +func init() { + getAuxvFn = runtime_getAuxv +} diff --git a/vendor/golang.org/x/sys/execabs/execabs.go b/vendor/golang.org/x/sys/execabs/execabs.go index b981cfbb4ae3f..3bf40fdfecd56 100644 --- a/vendor/golang.org/x/sys/execabs/execabs.go +++ b/vendor/golang.org/x/sys/execabs/execabs.go @@ -63,7 +63,7 @@ func LookPath(file string) (string, error) { } func fixCmd(name string, cmd *exec.Cmd) { - if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) { + if filepath.Base(name) == name && !filepath.IsAbs(cmd.Path) && !isGo119ErrFieldSet(cmd) { // exec.Command was called with a bare binary name and // exec.LookPath returned a path which is not absolute. // Set cmd.lookPathErr and clear cmd.Path so that it diff --git a/vendor/golang.org/x/sys/execabs/execabs_go118.go b/vendor/golang.org/x/sys/execabs/execabs_go118.go index 6ab5f50894e22..2000064a8124c 100644 --- a/vendor/golang.org/x/sys/execabs/execabs_go118.go +++ b/vendor/golang.org/x/sys/execabs/execabs_go118.go @@ -7,6 +7,12 @@ package execabs +import "os/exec" + func isGo119ErrDot(err error) bool { return false } + +func isGo119ErrFieldSet(cmd *exec.Cmd) bool { + return false +} diff --git a/vendor/golang.org/x/sys/execabs/execabs_go119.go b/vendor/golang.org/x/sys/execabs/execabs_go119.go index 46c5b525e7b8f..f364b3418926a 100644 --- a/vendor/golang.org/x/sys/execabs/execabs_go119.go +++ b/vendor/golang.org/x/sys/execabs/execabs_go119.go @@ -15,3 +15,7 @@ import ( func isGo119ErrDot(err error) bool { return errors.Is(err, exec.ErrDot) } + +func isGo119ErrFieldSet(cmd *exec.Cmd) bool { + return cmd.Err != nil +} diff --git a/vendor/golang.org/x/sys/unix/ioctl.go b/vendor/golang.org/x/sys/unix/ioctl.go index 1c51b0ec2bcdd..7ce8dd406fff5 100644 --- a/vendor/golang.org/x/sys/unix/ioctl.go +++ b/vendor/golang.org/x/sys/unix/ioctl.go @@ -8,7 +8,6 @@ package unix import ( - "runtime" "unsafe" ) @@ -27,7 +26,7 @@ func IoctlSetInt(fd int, req uint, value int) error { // passing the integer value directly. func IoctlSetPointerInt(fd int, req uint, value int) error { v := int32(value) - return ioctl(fd, req, uintptr(unsafe.Pointer(&v))) + return ioctlPtr(fd, req, unsafe.Pointer(&v)) } // IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. @@ -36,9 +35,7 @@ func IoctlSetPointerInt(fd int, req uint, value int) error { func IoctlSetWinsize(fd int, req uint, value *Winsize) error { // TODO: if we get the chance, remove the req parameter and // hardcode TIOCSWINSZ. - err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err + return ioctlPtr(fd, req, unsafe.Pointer(value)) } // IoctlSetTermios performs an ioctl on fd with a *Termios. @@ -46,9 +43,7 @@ func IoctlSetWinsize(fd int, req uint, value *Winsize) error { // The req value will usually be TCSETA or TIOCSETA. func IoctlSetTermios(fd int, req uint, value *Termios) error { // TODO: if we get the chance, remove the req parameter. - err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err + return ioctlPtr(fd, req, unsafe.Pointer(value)) } // IoctlGetInt performs an ioctl operation which gets an integer value @@ -58,18 +53,18 @@ func IoctlSetTermios(fd int, req uint, value *Termios) error { // for those, IoctlRetInt should be used instead of this function. func IoctlGetInt(fd int, req uint) (int, error) { var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return value, err } func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err } func IoctlGetTermios(fd int, req uint) (*Termios, error) { var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err } diff --git a/vendor/golang.org/x/sys/unix/ioctl_zos.go b/vendor/golang.org/x/sys/unix/ioctl_zos.go index 5384e7d91d798..6532f09af2e33 100644 --- a/vendor/golang.org/x/sys/unix/ioctl_zos.go +++ b/vendor/golang.org/x/sys/unix/ioctl_zos.go @@ -27,9 +27,7 @@ func IoctlSetInt(fd int, req uint, value int) error { func IoctlSetWinsize(fd int, req uint, value *Winsize) error { // TODO: if we get the chance, remove the req parameter and // hardcode TIOCSWINSZ. - err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) - runtime.KeepAlive(value) - return err + return ioctlPtr(fd, req, unsafe.Pointer(value)) } // IoctlSetTermios performs an ioctl on fd with a *Termios. @@ -51,13 +49,13 @@ func IoctlSetTermios(fd int, req uint, value *Termios) error { // for those, IoctlRetInt should be used instead of this function. func IoctlGetInt(fd int, req uint) (int, error) { var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return value, err } func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + err := ioctlPtr(fd, req, unsafe.Pointer(&value)) return &value, err } diff --git a/vendor/golang.org/x/sys/unix/ptrace_darwin.go b/vendor/golang.org/x/sys/unix/ptrace_darwin.go index 463c3eff7fd27..39dba6ca6a34b 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_darwin.go +++ b/vendor/golang.org/x/sys/unix/ptrace_darwin.go @@ -7,6 +7,12 @@ package unix +import "unsafe" + func ptrace(request int, pid int, addr uintptr, data uintptr) error { return ptrace1(request, pid, addr, data) } + +func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) error { + return ptrace1Ptr(request, pid, addr, data) +} diff --git a/vendor/golang.org/x/sys/unix/ptrace_ios.go b/vendor/golang.org/x/sys/unix/ptrace_ios.go index ed0509a0117c4..9ea66330a9688 100644 --- a/vendor/golang.org/x/sys/unix/ptrace_ios.go +++ b/vendor/golang.org/x/sys/unix/ptrace_ios.go @@ -7,6 +7,12 @@ package unix +import "unsafe" + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { return ENOTSUP } + +func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) { + return ENOTSUP +} diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 2db1b51e99f04..d9f5544ccf454 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -292,9 +292,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { break } } - - bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] - sa.Name = string(bytes) + sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n)) return sa, nil case AF_INET: @@ -411,6 +409,7 @@ func (w WaitStatus) CoreDump() bool { return w&0x80 == 0x80 } func (w WaitStatus) TrapCause() int { return -1 } //sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = ioctl // fcntl must never be called with cmd=F_DUP2FD because it doesn't work on AIX // There is no way to create a custom fcntl and to keep //sys fcntl easily, diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index eda42671f1954..7705c3270b5ec 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -245,8 +245,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { break } } - bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] - sa.Name = string(bytes) + sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n)) return sa, nil case AF_INET: diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 192b071b3d0e4..7064d6ebab6a8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -14,7 +14,6 @@ package unix import ( "fmt" - "runtime" "syscall" "unsafe" ) @@ -376,11 +375,10 @@ func Flistxattr(fd int, dest []byte) (sz int, err error) { func Kill(pid int, signum syscall.Signal) (err error) { return kill(pid, int(signum), 1) } //sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL func IoctlCtlInfo(fd int, ctlInfo *CtlInfo) error { - err := ioctl(fd, CTLIOCGINFO, uintptr(unsafe.Pointer(ctlInfo))) - runtime.KeepAlive(ctlInfo) - return err + return ioctlPtr(fd, CTLIOCGINFO, unsafe.Pointer(ctlInfo)) } // IfreqMTU is struct ifreq used to get or set a network device's MTU. @@ -394,16 +392,14 @@ type IfreqMTU struct { func IoctlGetIfreqMTU(fd int, ifname string) (*IfreqMTU, error) { var ifreq IfreqMTU copy(ifreq.Name[:], ifname) - err := ioctl(fd, SIOCGIFMTU, uintptr(unsafe.Pointer(&ifreq))) + err := ioctlPtr(fd, SIOCGIFMTU, unsafe.Pointer(&ifreq)) return &ifreq, err } // IoctlSetIfreqMTU performs the SIOCSIFMTU ioctl operation on fd to set the MTU // of the network device specified by ifreq.Name. func IoctlSetIfreqMTU(fd int, ifreq *IfreqMTU) error { - err := ioctl(fd, SIOCSIFMTU, uintptr(unsafe.Pointer(ifreq))) - runtime.KeepAlive(ifreq) - return err + return ioctlPtr(fd, SIOCSIFMTU, unsafe.Pointer(ifreq)) } //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS_SYSCTL diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index b37310ce9b405..9fa879806bcbf 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -47,5 +47,6 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64 //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 //sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace +//sys ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 //sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index d51ec996304e7..f17b8c526a535 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -47,5 +47,6 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT //sys Lstat(path string, stat *Stat_t) (err error) //sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace +//sys ptrace1Ptr(request int, pid int, addr unsafe.Pointer, data uintptr) (err error) = SYS_ptrace //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, stat *Statfs_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index a41111a794e25..221efc26bcdc6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -172,6 +172,7 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { } //sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index d50b9dc250b72..5bdde03e4a847 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -161,7 +161,8 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { return } -//sys ioctl(fd int, req uint, arg uintptr) (err error) +//sys ioctl(fd int, req uint, arg uintptr) (err error) = SYS_IOCTL +//sys ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) = SYS_IOCTL //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL @@ -253,6 +254,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } //sys ptrace(request int, pid int, addr uintptr, data int) (err error) +//sys ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) = SYS_PTRACE func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) @@ -267,19 +269,36 @@ func PtraceDetach(pid int) (err error) { } func PtraceGetFpRegs(pid int, fpregsout *FpReg) (err error) { - return ptrace(PT_GETFPREGS, pid, uintptr(unsafe.Pointer(fpregsout)), 0) + return ptracePtr(PT_GETFPREGS, pid, unsafe.Pointer(fpregsout), 0) } func PtraceGetRegs(pid int, regsout *Reg) (err error) { - return ptrace(PT_GETREGS, pid, uintptr(unsafe.Pointer(regsout)), 0) + return ptracePtr(PT_GETREGS, pid, unsafe.Pointer(regsout), 0) +} + +func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { + ioDesc := PtraceIoDesc{ + Op: int32(req), + Offs: offs, + } + if countin > 0 { + _ = out[:countin] // check bounds + ioDesc.Addr = &out[0] + } else if out != nil { + ioDesc.Addr = (*byte)(unsafe.Pointer(&_zero)) + } + ioDesc.SetLen(countin) + + err = ptracePtr(PT_IO, pid, unsafe.Pointer(&ioDesc), 0) + return int(ioDesc.Len), err } func PtraceLwpEvents(pid int, enable int) (err error) { return ptrace(PT_LWP_EVENTS, pid, 0, enable) } -func PtraceLwpInfo(pid int, info uintptr) (err error) { - return ptrace(PT_LWPINFO, pid, info, int(unsafe.Sizeof(PtraceLwpInfoStruct{}))) +func PtraceLwpInfo(pid int, info *PtraceLwpInfoStruct) (err error) { + return ptracePtr(PT_LWPINFO, pid, unsafe.Pointer(info), int(unsafe.Sizeof(*info))) } func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) { @@ -299,13 +318,25 @@ func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) { } func PtraceSetRegs(pid int, regs *Reg) (err error) { - return ptrace(PT_SETREGS, pid, uintptr(unsafe.Pointer(regs)), 0) + return ptracePtr(PT_SETREGS, pid, unsafe.Pointer(regs), 0) } func PtraceSingleStep(pid int) (err error) { return ptrace(PT_STEP, pid, 1, 0) } +func Dup3(oldfd, newfd, flags int) error { + if oldfd == newfd || flags&^O_CLOEXEC != 0 { + return EINVAL + } + how := F_DUP2FD + if flags&O_CLOEXEC != 0 { + how = F_DUP2FD_CLOEXEC + } + _, err := fcntl(oldfd, how, newfd) + return err +} + /* * Exposed directly */ diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index 6a91d471d09ca..b8da510043cb7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint32(length) +} + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0) @@ -57,16 +61,5 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func PtraceGetFsBase(pid int, fsbase *int64) (err error) { - return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) -} - -func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{ - Op: int32(req), - Offs: offs, - Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. - Len: uint32(countin), - } - err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err + return ptracePtr(PT_GETFSBASE, pid, unsafe.Pointer(fsbase), 0) } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index 48110a0abb924..47155c48390b5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint64(length) +} + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0) @@ -57,16 +61,5 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func PtraceGetFsBase(pid int, fsbase *int64) (err error) { - return ptrace(PT_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) -} - -func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{ - Op: int32(req), - Offs: offs, - Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. - Len: uint64(countin), - } - err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err + return ptracePtr(PT_GETFSBASE, pid, unsafe.Pointer(fsbase), 0) } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index 52f1d4b75a350..08932093fa245 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint32(length) +} + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0) @@ -55,14 +59,3 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) - -func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{ - Op: int32(req), - Offs: offs, - Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. - Len: uint32(countin), - } - err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err -} diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go index 5537ee4f2ede6..d151a0d0e53ac 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go @@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint64(length) +} + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0) @@ -55,14 +59,3 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) - -func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{ - Op: int32(req), - Offs: offs, - Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. - Len: uint64(countin), - } - err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err -} diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go index 164abd5d2152a..d5cd64b378742 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_riscv64.go @@ -42,6 +42,10 @@ func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } +func (d *PtraceIoDesc) SetLen(length int) { + d.Len = uint64(length) +} + func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0, 0) @@ -55,14 +59,3 @@ func sendfile(outfd int, infd int, offset *int64, count int) (written int, err e } func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) - -func PtraceIO(req int, pid int, offs uintptr, out []byte, countin int) (count int, err error) { - ioDesc := PtraceIoDesc{ - Op: int32(req), - Offs: offs, - Addr: uintptr(unsafe.Pointer(&out[0])), // TODO(#58351): this is not safe. - Len: uint64(countin), - } - err = ptrace(PT_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) - return int(ioDesc.Len), err -} diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go index 4ffb64808d75e..381fd4673bece 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -20,3 +20,11 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { } return } + +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(uintptr(arg))) + if r0 == -1 && er != nil { + err = er + } + return +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 5443dddd48d60..9735331530ac3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1015,8 +1015,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { for n < len(pp.Path) && pp.Path[n] != 0 { n++ } - bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] - sa.Name = string(bytes) + sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n)) return sa, nil case AF_INET: @@ -1365,6 +1364,10 @@ func SetsockoptTCPRepairOpt(fd, level, opt int, o []TCPRepairOpt) (err error) { return setsockopt(fd, level, opt, unsafe.Pointer(&o[0]), uintptr(SizeofTCPRepairOpt*len(o))) } +func SetsockoptTCPMD5Sig(fd, level, opt int, s *TCPMD5Sig) error { + return setsockopt(fd, level, opt, unsafe.Pointer(s), unsafe.Sizeof(*s)) +} + // Keyctl Commands (http://man7.org/linux/man-pages/man2/keyctl.2.html) // KeyctlInt calls keyctl commands in which each argument is an int. @@ -1579,6 +1582,7 @@ func BindToDevice(fd int, device string) (err error) { } //sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) +//sys ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) = SYS_PTRACE func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err error) { // The peek requests are machine-size oriented, so we wrap it @@ -1596,7 +1600,7 @@ func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err erro // boundary. n := 0 if addr%SizeofPtr != 0 { - err = ptrace(req, pid, addr-addr%SizeofPtr, uintptr(unsafe.Pointer(&buf[0]))) + err = ptracePtr(req, pid, addr-addr%SizeofPtr, unsafe.Pointer(&buf[0])) if err != nil { return 0, err } @@ -1608,7 +1612,7 @@ func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err erro for len(out) > 0 { // We use an internal buffer to guarantee alignment. // It's not documented if this is necessary, but we're paranoid. - err = ptrace(req, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0]))) + err = ptracePtr(req, pid, addr+uintptr(n), unsafe.Pointer(&buf[0])) if err != nil { return n, err } @@ -1640,7 +1644,7 @@ func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (c n := 0 if addr%SizeofPtr != 0 { var buf [SizeofPtr]byte - err = ptrace(peekReq, pid, addr-addr%SizeofPtr, uintptr(unsafe.Pointer(&buf[0]))) + err = ptracePtr(peekReq, pid, addr-addr%SizeofPtr, unsafe.Pointer(&buf[0])) if err != nil { return 0, err } @@ -1667,7 +1671,7 @@ func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (c // Trailing edge. if len(data) > 0 { var buf [SizeofPtr]byte - err = ptrace(peekReq, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0]))) + err = ptracePtr(peekReq, pid, addr+uintptr(n), unsafe.Pointer(&buf[0])) if err != nil { return n, err } @@ -1696,11 +1700,11 @@ func PtracePokeUser(pid int, addr uintptr, data []byte) (count int, err error) { } func PtraceGetRegs(pid int, regsout *PtraceRegs) (err error) { - return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) + return ptracePtr(PTRACE_GETREGS, pid, 0, unsafe.Pointer(regsout)) } func PtraceSetRegs(pid int, regs *PtraceRegs) (err error) { - return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) + return ptracePtr(PTRACE_SETREGS, pid, 0, unsafe.Pointer(regs)) } func PtraceSetOptions(pid int, options int) (err error) { @@ -1709,7 +1713,7 @@ func PtraceSetOptions(pid int, options int) (err error) { func PtraceGetEventMsg(pid int) (msg uint, err error) { var data _C_long - err = ptrace(PTRACE_GETEVENTMSG, pid, 0, uintptr(unsafe.Pointer(&data))) + err = ptracePtr(PTRACE_GETEVENTMSG, pid, 0, unsafe.Pointer(&data)) msg = uint(data) return } @@ -2154,6 +2158,14 @@ func isGroupMember(gid int) bool { return false } +func isCapDacOverrideSet() bool { + hdr := CapUserHeader{Version: LINUX_CAPABILITY_VERSION_3} + data := [2]CapUserData{} + err := Capget(&hdr, &data[0]) + + return err == nil && data[0].Effective&(1< 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index 77479d4581558..1129065624e58 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -388,6 +388,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -414,6 +424,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index 2e966d4d7a6c5..55f5abfe599c3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -388,6 +388,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -414,6 +424,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index d65a7c0fa6e9a..d39651c2b586b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -388,6 +388,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -414,6 +424,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index 6f0b97c6db3a2..ddb7408680118 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -388,6 +388,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -414,6 +424,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go index e1c23b5272367..09a53a616c050 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_riscv64.go @@ -388,6 +388,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { @@ -414,6 +424,16 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr unsafe.Pointer, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 36ea3a55b72b7..430cb24de7e0e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -379,6 +379,16 @@ func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptracePtr(request int, pid int, addr uintptr, data unsafe.Pointer) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(arg) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 79f7389963ec9..8e1d9c8f66639 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -405,6 +405,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index fb161f3a26364..21c6950400e30 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -405,6 +405,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 4c8ac993a8806..298168f90a17b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -405,6 +405,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 76dd8ec4fdb93..68b8bd492fec5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -405,6 +405,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { var _p0 unsafe.Pointer if len(mib) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index caeb807bd4e8d..0b0f910e1ab9c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index a05e5f4fff6d0..48ff5de75b55f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index b2da8e50cc7a8..2452a641dae7b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 048b2655e6f80..5e35600a60c3c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index 6f33e37e723fe..b04cef1a19885 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index 330cf7f7ac66f..47a07ee0c2748 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index 5f24de0d9d76f..573378fdb96f0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -527,6 +527,14 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { return } +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(libc_ioctl_trampoline_addr, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + var libc_ioctl_trampoline_addr uintptr //go:cgo_import_dynamic libc_ioctl ioctl "libc.so" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 78d4a4240e9c3..4873a1e5d3e9c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -657,6 +657,17 @@ func ioctlRet(fd int, req uint, arg uintptr) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtrRet(fd int, req uint, arg unsafe.Pointer) (ret int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0) + ret = int(r0) + if e1 != 0 { + err = e1 + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpoll)), 3, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout), 0, 0, 0) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go index f2079457c6b24..07bfe2ef9ad07 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_zos_s390x.go @@ -267,6 +267,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ioctlPtr(fd int, req uint, arg unsafe.Pointer) (err error) { + _, _, e1 := syscall_syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index d9c78cdcbc45e..29dc483378aeb 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -362,7 +362,7 @@ type FpExtendedPrecision struct{} type PtraceIoDesc struct { Op int32 Offs uintptr - Addr uintptr + Addr *byte Len uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 26991b165596b..0a89b28906a67 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -367,7 +367,7 @@ type FpExtendedPrecision struct{} type PtraceIoDesc struct { Op int32 Offs uintptr - Addr uintptr + Addr *byte Len uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index f8324e7e7f495..c8666bb15288b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -350,7 +350,7 @@ type FpExtendedPrecision struct { type PtraceIoDesc struct { Op int32 Offs uintptr - Addr uintptr + Addr *byte Len uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 4220411f341ae..88fb48a887b10 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -347,7 +347,7 @@ type FpExtendedPrecision struct{} type PtraceIoDesc struct { Op int32 Offs uintptr - Addr uintptr + Addr *byte Len uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index 0660fd45c7c6f..698dc975e92be 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -348,7 +348,7 @@ type FpExtendedPrecision struct{} type PtraceIoDesc struct { Op int32 Offs uintptr - Addr uintptr + Addr *byte Len uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 7d9fc8f1c91ae..ca84727cfe804 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -456,36 +456,60 @@ type Ucred struct { } type TCPInfo struct { - State uint8 - Ca_state uint8 - Retransmits uint8 - Probes uint8 - Backoff uint8 - Options uint8 - Rto uint32 - Ato uint32 - Snd_mss uint32 - Rcv_mss uint32 - Unacked uint32 - Sacked uint32 - Lost uint32 - Retrans uint32 - Fackets uint32 - Last_data_sent uint32 - Last_ack_sent uint32 - Last_data_recv uint32 - Last_ack_recv uint32 - Pmtu uint32 - Rcv_ssthresh uint32 - Rtt uint32 - Rttvar uint32 - Snd_ssthresh uint32 - Snd_cwnd uint32 - Advmss uint32 - Reordering uint32 - Rcv_rtt uint32 - Rcv_space uint32 - Total_retrans uint32 + State uint8 + Ca_state uint8 + Retransmits uint8 + Probes uint8 + Backoff uint8 + Options uint8 + Rto uint32 + Ato uint32 + Snd_mss uint32 + Rcv_mss uint32 + Unacked uint32 + Sacked uint32 + Lost uint32 + Retrans uint32 + Fackets uint32 + Last_data_sent uint32 + Last_ack_sent uint32 + Last_data_recv uint32 + Last_ack_recv uint32 + Pmtu uint32 + Rcv_ssthresh uint32 + Rtt uint32 + Rttvar uint32 + Snd_ssthresh uint32 + Snd_cwnd uint32 + Advmss uint32 + Reordering uint32 + Rcv_rtt uint32 + Rcv_space uint32 + Total_retrans uint32 + Pacing_rate uint64 + Max_pacing_rate uint64 + Bytes_acked uint64 + Bytes_received uint64 + Segs_out uint32 + Segs_in uint32 + Notsent_bytes uint32 + Min_rtt uint32 + Data_segs_in uint32 + Data_segs_out uint32 + Delivery_rate uint64 + Busy_time uint64 + Rwnd_limited uint64 + Sndbuf_limited uint64 + Delivered uint32 + Delivered_ce uint32 + Bytes_sent uint64 + Bytes_retrans uint64 + Dsack_dups uint32 + Reord_seen uint32 + Rcv_ooopack uint32 + Snd_wnd uint32 + Rcv_wnd uint32 + Rehash uint32 } type CanFilter struct { @@ -528,7 +552,7 @@ const ( SizeofIPv6MTUInfo = 0x20 SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc - SizeofTCPInfo = 0x68 + SizeofTCPInfo = 0xf0 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -1043,6 +1067,7 @@ const ( PerfBitCommExec = CBitFieldMaskBit24 PerfBitUseClockID = CBitFieldMaskBit25 PerfBitContextSwitch = CBitFieldMaskBit26 + PerfBitWriteBackward = CBitFieldMaskBit27 ) const ( @@ -1239,7 +1264,7 @@ type TCPMD5Sig struct { Flags uint8 Prefixlen uint8 Keylen uint16 - _ uint32 + Ifindex int32 Key [80]uint8 } @@ -1939,7 +1964,11 @@ const ( NFT_MSG_GETOBJ = 0x13 NFT_MSG_DELOBJ = 0x14 NFT_MSG_GETOBJ_RESET = 0x15 - NFT_MSG_MAX = 0x19 + NFT_MSG_NEWFLOWTABLE = 0x16 + NFT_MSG_GETFLOWTABLE = 0x17 + NFT_MSG_DELFLOWTABLE = 0x18 + NFT_MSG_GETRULE_RESET = 0x19 + NFT_MSG_MAX = 0x1a NFTA_LIST_UNSPEC = 0x0 NFTA_LIST_ELEM = 0x1 NFTA_HOOK_UNSPEC = 0x0 @@ -2443,9 +2472,11 @@ const ( SOF_TIMESTAMPING_OPT_STATS = 0x1000 SOF_TIMESTAMPING_OPT_PKTINFO = 0x2000 SOF_TIMESTAMPING_OPT_TX_SWHW = 0x4000 + SOF_TIMESTAMPING_BIND_PHC = 0x8000 + SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000 - SOF_TIMESTAMPING_LAST = 0x8000 - SOF_TIMESTAMPING_MASK = 0xffff + SOF_TIMESTAMPING_LAST = 0x10000 + SOF_TIMESTAMPING_MASK = 0x1ffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -3265,7 +3296,7 @@ const ( DEVLINK_ATTR_LINECARD_SUPPORTED_TYPES = 0xae DEVLINK_ATTR_NESTED_DEVLINK = 0xaf DEVLINK_ATTR_SELFTESTS = 0xb0 - DEVLINK_ATTR_MAX = 0xb0 + DEVLINK_ATTR_MAX = 0xb3 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0 DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1 DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0 @@ -3281,7 +3312,8 @@ const ( DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR = 0x1 DEVLINK_PORT_FN_ATTR_STATE = 0x2 DEVLINK_PORT_FN_ATTR_OPSTATE = 0x3 - DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x3 + DEVLINK_PORT_FN_ATTR_CAPS = 0x4 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x4 ) type FsverityDigest struct { @@ -3572,7 +3604,8 @@ const ( ETHTOOL_MSG_MODULE_SET = 0x23 ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 - ETHTOOL_MSG_USER_MAX = 0x25 + ETHTOOL_MSG_RSS_GET = 0x26 + ETHTOOL_MSG_USER_MAX = 0x26 ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3611,7 +3644,8 @@ const ( ETHTOOL_MSG_MODULE_GET_REPLY = 0x23 ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 - ETHTOOL_MSG_KERNEL_MAX = 0x25 + ETHTOOL_MSG_RSS_GET_REPLY = 0x26 + ETHTOOL_MSG_KERNEL_MAX = 0x26 ETHTOOL_A_HEADER_UNSPEC = 0x0 ETHTOOL_A_HEADER_DEV_INDEX = 0x1 ETHTOOL_A_HEADER_DEV_NAME = 0x2 @@ -3679,7 +3713,8 @@ const ( ETHTOOL_A_LINKSTATE_SQI_MAX = 0x4 ETHTOOL_A_LINKSTATE_EXT_STATE = 0x5 ETHTOOL_A_LINKSTATE_EXT_SUBSTATE = 0x6 - ETHTOOL_A_LINKSTATE_MAX = 0x6 + ETHTOOL_A_LINKSTATE_EXT_DOWN_CNT = 0x7 + ETHTOOL_A_LINKSTATE_MAX = 0x7 ETHTOOL_A_DEBUG_UNSPEC = 0x0 ETHTOOL_A_DEBUG_HEADER = 0x1 ETHTOOL_A_DEBUG_MSGMASK = 0x2 @@ -4409,7 +4444,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x140 + NL80211_ATTR_MAX = 0x141 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -4552,6 +4587,7 @@ const ( NL80211_ATTR_SUPPORT_MESH_AUTH = 0x73 NL80211_ATTR_SURVEY_INFO = 0x54 NL80211_ATTR_SURVEY_RADIO_STATS = 0xda + NL80211_ATTR_TD_BITMAP = 0x141 NL80211_ATTR_TDLS_ACTION = 0x88 NL80211_ATTR_TDLS_DIALOG_TOKEN = 0x89 NL80211_ATTR_TDLS_EXTERNAL_SETUP = 0x8c @@ -5752,3 +5788,25 @@ const ( AUDIT_NLGRP_NONE = 0x0 AUDIT_NLGRP_READLOG = 0x1 ) + +const ( + TUN_F_CSUM = 0x1 + TUN_F_TSO4 = 0x2 + TUN_F_TSO6 = 0x4 + TUN_F_TSO_ECN = 0x8 + TUN_F_UFO = 0x10 +) + +const ( + VIRTIO_NET_HDR_F_NEEDS_CSUM = 0x1 + VIRTIO_NET_HDR_F_DATA_VALID = 0x2 + VIRTIO_NET_HDR_F_RSC_INFO = 0x4 +) + +const ( + VIRTIO_NET_HDR_GSO_NONE = 0x0 + VIRTIO_NET_HDR_GSO_TCPV4 = 0x1 + VIRTIO_NET_HDR_GSO_UDP = 0x3 + VIRTIO_NET_HDR_GSO_TCPV6 = 0x4 + VIRTIO_NET_HDR_GSO_ECN = 0x80 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 89c516a29acfd..4ecc1495cd0af 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -414,7 +414,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]int8 + Data [122]byte _ uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 62b4fb269963b..34fddff964e9b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -427,7 +427,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index e86b35893ece5..3b14a6031f3f5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -405,7 +405,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]uint8 + Data [122]byte _ uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 6c6be4c911d8f..0517651ab3f9d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -406,7 +406,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index 4982ea355a286..3b0c518134525 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -407,7 +407,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 173141a670324..fccdf4dd0f460 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -410,7 +410,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]int8 + Data [122]byte _ uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 93ae4c51673dc..500de8fc07db4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -409,7 +409,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 4e4e510ca5198..d0434cd2c6dba 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -409,7 +409,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 3f5ba013d9953..84206ba5347af 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -410,7 +410,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]int8 + Data [122]byte _ uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index 71dfe7cdb47a4..ab078cf1f51de 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -417,7 +417,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [122]uint8 + Data [122]byte _ uint32 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 3a2b7f0a666e6..42eb2c4cefd61 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -416,7 +416,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]uint8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index a52d62756328b..31304a4e8bb51 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -416,7 +416,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]uint8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index dfc007d8a6912..c311f9612d885 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -434,7 +434,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]uint8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index b53cb9103d307..bba3cefac1dd8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -429,7 +429,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index fe0aa3547280f..ad8a01380461c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -411,7 +411,7 @@ const ( type SockaddrStorage struct { Family uint16 - _ [118]int8 + Data [118]byte _ uint64 } diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 41cb3c01fd957..3723b2c224c85 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -824,6 +824,9 @@ const socket_error = uintptr(^uint32(0)) //sys WSAStartup(verreq uint32, data *WSAData) (sockerr error) = ws2_32.WSAStartup //sys WSACleanup() (err error) [failretval==socket_error] = ws2_32.WSACleanup //sys WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) [failretval==socket_error] = ws2_32.WSAIoctl +//sys WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) [failretval==socket_error] = ws2_32.WSALookupServiceBeginW +//sys WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) [failretval==socket_error] = ws2_32.WSALookupServiceNextW +//sys WSALookupServiceEnd(handle Handle) (err error) [failretval==socket_error] = ws2_32.WSALookupServiceEnd //sys socket(af int32, typ int32, protocol int32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.socket //sys sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (err error) [failretval==socket_error] = ws2_32.sendto //sys recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen *int32) (n int32, err error) [failretval==-1] = ws2_32.recvfrom @@ -1019,8 +1022,7 @@ func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) { for n < len(pp.Path) && pp.Path[n] != 0 { n++ } - bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] - sa.Name = string(bytes) + sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n)) return sa, nil case AF_INET: diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 0c4add974106c..857acf1032d9f 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1243,6 +1243,51 @@ const ( DnsSectionAdditional = 0x0003 ) +const ( + // flags of WSALookupService + LUP_DEEP = 0x0001 + LUP_CONTAINERS = 0x0002 + LUP_NOCONTAINERS = 0x0004 + LUP_NEAREST = 0x0008 + LUP_RETURN_NAME = 0x0010 + LUP_RETURN_TYPE = 0x0020 + LUP_RETURN_VERSION = 0x0040 + LUP_RETURN_COMMENT = 0x0080 + LUP_RETURN_ADDR = 0x0100 + LUP_RETURN_BLOB = 0x0200 + LUP_RETURN_ALIASES = 0x0400 + LUP_RETURN_QUERY_STRING = 0x0800 + LUP_RETURN_ALL = 0x0FF0 + LUP_RES_SERVICE = 0x8000 + + LUP_FLUSHCACHE = 0x1000 + LUP_FLUSHPREVIOUS = 0x2000 + + LUP_NON_AUTHORITATIVE = 0x4000 + LUP_SECURE = 0x8000 + LUP_RETURN_PREFERRED_NAMES = 0x10000 + LUP_DNS_ONLY = 0x20000 + + LUP_ADDRCONFIG = 0x100000 + LUP_DUAL_ADDR = 0x200000 + LUP_FILESERVER = 0x400000 + LUP_DISABLE_IDN_ENCODING = 0x00800000 + LUP_API_ANSI = 0x01000000 + + LUP_RESOLUTION_HANDLE = 0x80000000 +) + +const ( + // values of WSAQUERYSET's namespace + NS_ALL = 0 + NS_DNS = 12 + NS_NLA = 15 + NS_BTH = 16 + NS_EMAIL = 37 + NS_PNRPNAME = 38 + NS_PNRPCLOUD = 39 +) + type DNSSRVData struct { Target *uint16 Priority uint16 @@ -3258,3 +3303,43 @@ const ( DWMWA_TEXT_COLOR = 36 DWMWA_VISIBLE_FRAME_BORDER_THICKNESS = 37 ) + +type WSAQUERYSET struct { + Size uint32 + ServiceInstanceName *uint16 + ServiceClassId *GUID + Version *WSAVersion + Comment *uint16 + NameSpace uint32 + NSProviderId *GUID + Context *uint16 + NumberOfProtocols uint32 + AfpProtocols *AFProtocols + QueryString *uint16 + NumberOfCsAddrs uint32 + SaBuffer *CSAddrInfo + OutputFlags uint32 + Blob *BLOB +} + +type WSAVersion struct { + Version uint32 + EnumerationOfComparison int32 +} + +type AFProtocols struct { + AddressFamily int32 + Protocol int32 +} + +type CSAddrInfo struct { + LocalAddr SocketAddress + RemoteAddr SocketAddress + SocketType int32 + Protocol int32 +} + +type BLOB struct { + Size uint32 + BlobData *byte +} diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index ac60052e44a79..6d2a268534d79 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -474,6 +474,9 @@ var ( procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") procWSAIoctl = modws2_32.NewProc("WSAIoctl") + procWSALookupServiceBeginW = modws2_32.NewProc("WSALookupServiceBeginW") + procWSALookupServiceEnd = modws2_32.NewProc("WSALookupServiceEnd") + procWSALookupServiceNextW = modws2_32.NewProc("WSALookupServiceNextW") procWSARecv = modws2_32.NewProc("WSARecv") procWSARecvFrom = modws2_32.NewProc("WSARecvFrom") procWSASend = modws2_32.NewProc("WSASend") @@ -4067,6 +4070,30 @@ func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbo return } +func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) { + r1, _, e1 := syscall.Syscall(procWSALookupServiceBeginW.Addr(), 3, uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle))) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func WSALookupServiceEnd(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procWSALookupServiceEnd.Addr(), 1, uintptr(handle), 0, 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + +func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) { + r1, _, e1 := syscall.Syscall6(procWSALookupServiceNextW.Addr(), 4, uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet)), 0, 0) + if r1 == socket_error { + err = errnoErr(e1) + } + return +} + func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) { r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) if r1 == socket_error { diff --git a/vendor/golang.org/x/tools/LICENSE b/vendor/golang.org/x/tools/LICENSE new file mode 100644 index 0000000000000..6a66aea5eafe0 --- /dev/null +++ b/vendor/golang.org/x/tools/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/tools/PATENTS b/vendor/golang.org/x/tools/PATENTS new file mode 100644 index 0000000000000..733099041f84f --- /dev/null +++ b/vendor/golang.org/x/tools/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/tools/cmd/stringer/stringer.go b/vendor/golang.org/x/tools/cmd/stringer/stringer.go new file mode 100644 index 0000000000000..998d1a51bfd01 --- /dev/null +++ b/vendor/golang.org/x/tools/cmd/stringer/stringer.go @@ -0,0 +1,657 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Stringer is a tool to automate the creation of methods that satisfy the fmt.Stringer +// interface. Given the name of a (signed or unsigned) integer type T that has constants +// defined, stringer will create a new self-contained Go source file implementing +// +// func (t T) String() string +// +// The file is created in the same package and directory as the package that defines T. +// It has helpful defaults designed for use with go generate. +// +// Stringer works best with constants that are consecutive values such as created using iota, +// but creates good code regardless. In the future it might also provide custom support for +// constant sets that are bit patterns. +// +// For example, given this snippet, +// +// package painkiller +// +// type Pill int +// +// const ( +// Placebo Pill = iota +// Aspirin +// Ibuprofen +// Paracetamol +// Acetaminophen = Paracetamol +// ) +// +// running this command +// +// stringer -type=Pill +// +// in the same directory will create the file pill_string.go, in package painkiller, +// containing a definition of +// +// func (Pill) String() string +// +// That method will translate the value of a Pill constant to the string representation +// of the respective constant name, so that the call fmt.Print(painkiller.Aspirin) will +// print the string "Aspirin". +// +// Typically this process would be run using go generate, like this: +// +// //go:generate stringer -type=Pill +// +// If multiple constants have the same value, the lexically first matching name will +// be used (in the example, Acetaminophen will print as "Paracetamol"). +// +// With no arguments, it processes the package in the current directory. +// Otherwise, the arguments must name a single directory holding a Go package +// or a set of Go source files that represent a single Go package. +// +// The -type flag accepts a comma-separated list of types so a single run can +// generate methods for multiple types. The default output file is t_string.go, +// where t is the lower-cased name of the first type listed. It can be overridden +// with the -output flag. +// +// The -linecomment flag tells stringer to generate the text of any line comment, trimmed +// of leading spaces, instead of the constant name. For instance, if the constants above had a +// Pill prefix, one could write +// +// PillAspirin // Aspirin +// +// to suppress it in the output. +package main // import "golang.org/x/tools/cmd/stringer" + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/constant" + "go/format" + "go/token" + "go/types" + "log" + "os" + "path/filepath" + "sort" + "strings" + + "golang.org/x/tools/go/packages" +) + +var ( + typeNames = flag.String("type", "", "comma-separated list of type names; must be set") + output = flag.String("output", "", "output file name; default srcdir/_string.go") + trimprefix = flag.String("trimprefix", "", "trim the `prefix` from the generated constant names") + linecomment = flag.Bool("linecomment", false, "use line comment text as printed text when present") + buildTags = flag.String("tags", "", "comma-separated list of build tags to apply") +) + +// Usage is a replacement usage function for the flags package. +func Usage() { + fmt.Fprintf(os.Stderr, "Usage of stringer:\n") + fmt.Fprintf(os.Stderr, "\tstringer [flags] -type T [directory]\n") + fmt.Fprintf(os.Stderr, "\tstringer [flags] -type T files... # Must be a single package\n") + fmt.Fprintf(os.Stderr, "For more information, see:\n") + fmt.Fprintf(os.Stderr, "\thttps://pkg.go.dev/golang.org/x/tools/cmd/stringer\n") + fmt.Fprintf(os.Stderr, "Flags:\n") + flag.PrintDefaults() +} + +func main() { + log.SetFlags(0) + log.SetPrefix("stringer: ") + flag.Usage = Usage + flag.Parse() + if len(*typeNames) == 0 { + flag.Usage() + os.Exit(2) + } + types := strings.Split(*typeNames, ",") + var tags []string + if len(*buildTags) > 0 { + tags = strings.Split(*buildTags, ",") + } + + // We accept either one directory or a list of files. Which do we have? + args := flag.Args() + if len(args) == 0 { + // Default: process whole package in current directory. + args = []string{"."} + } + + // Parse the package once. + var dir string + g := Generator{ + trimPrefix: *trimprefix, + lineComment: *linecomment, + } + // TODO(suzmue): accept other patterns for packages (directories, list of files, import paths, etc). + if len(args) == 1 && isDirectory(args[0]) { + dir = args[0] + } else { + if len(tags) != 0 { + log.Fatal("-tags option applies only to directories, not when files are specified") + } + dir = filepath.Dir(args[0]) + } + + g.parsePackage(args, tags) + + // Print the header and package clause. + g.Printf("// Code generated by \"stringer %s\"; DO NOT EDIT.\n", strings.Join(os.Args[1:], " ")) + g.Printf("\n") + g.Printf("package %s", g.pkg.name) + g.Printf("\n") + g.Printf("import \"strconv\"\n") // Used by all methods. + + // Run generate for each type. + for _, typeName := range types { + g.generate(typeName) + } + + // Format the output. + src := g.format() + + // Write to file. + outputName := *output + if outputName == "" { + baseName := fmt.Sprintf("%s_string.go", types[0]) + outputName = filepath.Join(dir, strings.ToLower(baseName)) + } + err := os.WriteFile(outputName, src, 0644) + if err != nil { + log.Fatalf("writing output: %s", err) + } +} + +// isDirectory reports whether the named file is a directory. +func isDirectory(name string) bool { + info, err := os.Stat(name) + if err != nil { + log.Fatal(err) + } + return info.IsDir() +} + +// Generator holds the state of the analysis. Primarily used to buffer +// the output for format.Source. +type Generator struct { + buf bytes.Buffer // Accumulated output. + pkg *Package // Package we are scanning. + + trimPrefix string + lineComment bool +} + +func (g *Generator) Printf(format string, args ...interface{}) { + fmt.Fprintf(&g.buf, format, args...) +} + +// File holds a single parsed file and associated data. +type File struct { + pkg *Package // Package to which this file belongs. + file *ast.File // Parsed AST. + // These fields are reset for each type being generated. + typeName string // Name of the constant type. + values []Value // Accumulator for constant values of that type. + + trimPrefix string + lineComment bool +} + +type Package struct { + name string + defs map[*ast.Ident]types.Object + files []*File +} + +// parsePackage analyzes the single package constructed from the patterns and tags. +// parsePackage exits if there is an error. +func (g *Generator) parsePackage(patterns []string, tags []string) { + cfg := &packages.Config{ + Mode: packages.NeedName | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax, + // TODO: Need to think about constants in test files. Maybe write type_string_test.go + // in a separate pass? For later. + Tests: false, + BuildFlags: []string{fmt.Sprintf("-tags=%s", strings.Join(tags, " "))}, + } + pkgs, err := packages.Load(cfg, patterns...) + if err != nil { + log.Fatal(err) + } + if len(pkgs) != 1 { + log.Fatalf("error: %d packages found", len(pkgs)) + } + g.addPackage(pkgs[0]) +} + +// addPackage adds a type checked Package and its syntax files to the generator. +func (g *Generator) addPackage(pkg *packages.Package) { + g.pkg = &Package{ + name: pkg.Name, + defs: pkg.TypesInfo.Defs, + files: make([]*File, len(pkg.Syntax)), + } + + for i, file := range pkg.Syntax { + g.pkg.files[i] = &File{ + file: file, + pkg: g.pkg, + trimPrefix: g.trimPrefix, + lineComment: g.lineComment, + } + } +} + +// generate produces the String method for the named type. +func (g *Generator) generate(typeName string) { + values := make([]Value, 0, 100) + for _, file := range g.pkg.files { + // Set the state for this run of the walker. + file.typeName = typeName + file.values = nil + if file.file != nil { + ast.Inspect(file.file, file.genDecl) + values = append(values, file.values...) + } + } + + if len(values) == 0 { + log.Fatalf("no values defined for type %s", typeName) + } + // Generate code that will fail if the constants change value. + g.Printf("func _() {\n") + g.Printf("\t// An \"invalid array index\" compiler error signifies that the constant values have changed.\n") + g.Printf("\t// Re-run the stringer command to generate them again.\n") + g.Printf("\tvar x [1]struct{}\n") + for _, v := range values { + g.Printf("\t_ = x[%s - %s]\n", v.originalName, v.str) + } + g.Printf("}\n") + runs := splitIntoRuns(values) + // The decision of which pattern to use depends on the number of + // runs in the numbers. If there's only one, it's easy. For more than + // one, there's a tradeoff between complexity and size of the data + // and code vs. the simplicity of a map. A map takes more space, + // but so does the code. The decision here (crossover at 10) is + // arbitrary, but considers that for large numbers of runs the cost + // of the linear scan in the switch might become important, and + // rather than use yet another algorithm such as binary search, + // we punt and use a map. In any case, the likelihood of a map + // being necessary for any realistic example other than bitmasks + // is very low. And bitmasks probably deserve their own analysis, + // to be done some other day. + switch { + case len(runs) == 1: + g.buildOneRun(runs, typeName) + case len(runs) <= 10: + g.buildMultipleRuns(runs, typeName) + default: + g.buildMap(runs, typeName) + } +} + +// splitIntoRuns breaks the values into runs of contiguous sequences. +// For example, given 1,2,3,5,6,7 it returns {1,2,3},{5,6,7}. +// The input slice is known to be non-empty. +func splitIntoRuns(values []Value) [][]Value { + // We use stable sort so the lexically first name is chosen for equal elements. + sort.Stable(byValue(values)) + // Remove duplicates. Stable sort has put the one we want to print first, + // so use that one. The String method won't care about which named constant + // was the argument, so the first name for the given value is the only one to keep. + // We need to do this because identical values would cause the switch or map + // to fail to compile. + j := 1 + for i := 1; i < len(values); i++ { + if values[i].value != values[i-1].value { + values[j] = values[i] + j++ + } + } + values = values[:j] + runs := make([][]Value, 0, 10) + for len(values) > 0 { + // One contiguous sequence per outer loop. + i := 1 + for i < len(values) && values[i].value == values[i-1].value+1 { + i++ + } + runs = append(runs, values[:i]) + values = values[i:] + } + return runs +} + +// format returns the gofmt-ed contents of the Generator's buffer. +func (g *Generator) format() []byte { + src, err := format.Source(g.buf.Bytes()) + if err != nil { + // Should never happen, but can arise when developing this code. + // The user can compile the output to see the error. + log.Printf("warning: internal error: invalid Go generated: %s", err) + log.Printf("warning: compile the package to analyze the error") + return g.buf.Bytes() + } + return src +} + +// Value represents a declared constant. +type Value struct { + originalName string // The name of the constant. + name string // The name with trimmed prefix. + // The value is stored as a bit pattern alone. The boolean tells us + // whether to interpret it as an int64 or a uint64; the only place + // this matters is when sorting. + // Much of the time the str field is all we need; it is printed + // by Value.String. + value uint64 // Will be converted to int64 when needed. + signed bool // Whether the constant is a signed type. + str string // The string representation given by the "go/constant" package. +} + +func (v *Value) String() string { + return v.str +} + +// byValue lets us sort the constants into increasing order. +// We take care in the Less method to sort in signed or unsigned order, +// as appropriate. +type byValue []Value + +func (b byValue) Len() int { return len(b) } +func (b byValue) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byValue) Less(i, j int) bool { + if b[i].signed { + return int64(b[i].value) < int64(b[j].value) + } + return b[i].value < b[j].value +} + +// genDecl processes one declaration clause. +func (f *File) genDecl(node ast.Node) bool { + decl, ok := node.(*ast.GenDecl) + if !ok || decl.Tok != token.CONST { + // We only care about const declarations. + return true + } + // The name of the type of the constants we are declaring. + // Can change if this is a multi-element declaration. + typ := "" + // Loop over the elements of the declaration. Each element is a ValueSpec: + // a list of names possibly followed by a type, possibly followed by values. + // If the type and value are both missing, we carry down the type (and value, + // but the "go/types" package takes care of that). + for _, spec := range decl.Specs { + vspec := spec.(*ast.ValueSpec) // Guaranteed to succeed as this is CONST. + if vspec.Type == nil && len(vspec.Values) > 0 { + // "X = 1". With no type but a value. If the constant is untyped, + // skip this vspec and reset the remembered type. + typ = "" + + // If this is a simple type conversion, remember the type. + // We don't mind if this is actually a call; a qualified call won't + // be matched (that will be SelectorExpr, not Ident), and only unusual + // situations will result in a function call that appears to be + // a type conversion. + ce, ok := vspec.Values[0].(*ast.CallExpr) + if !ok { + continue + } + id, ok := ce.Fun.(*ast.Ident) + if !ok { + continue + } + typ = id.Name + } + if vspec.Type != nil { + // "X T". We have a type. Remember it. + ident, ok := vspec.Type.(*ast.Ident) + if !ok { + continue + } + typ = ident.Name + } + if typ != f.typeName { + // This is not the type we're looking for. + continue + } + // We now have a list of names (from one line of source code) all being + // declared with the desired type. + // Grab their names and actual values and store them in f.values. + for _, name := range vspec.Names { + if name.Name == "_" { + continue + } + // This dance lets the type checker find the values for us. It's a + // bit tricky: look up the object declared by the name, find its + // types.Const, and extract its value. + obj, ok := f.pkg.defs[name] + if !ok { + log.Fatalf("no value for constant %s", name) + } + info := obj.Type().Underlying().(*types.Basic).Info() + if info&types.IsInteger == 0 { + log.Fatalf("can't handle non-integer constant type %s", typ) + } + value := obj.(*types.Const).Val() // Guaranteed to succeed as this is CONST. + if value.Kind() != constant.Int { + log.Fatalf("can't happen: constant is not an integer %s", name) + } + i64, isInt := constant.Int64Val(value) + u64, isUint := constant.Uint64Val(value) + if !isInt && !isUint { + log.Fatalf("internal error: value of %s is not an integer: %s", name, value.String()) + } + if !isInt { + u64 = uint64(i64) + } + v := Value{ + originalName: name.Name, + value: u64, + signed: info&types.IsUnsigned == 0, + str: value.String(), + } + if c := vspec.Comment; f.lineComment && c != nil && len(c.List) == 1 { + v.name = strings.TrimSpace(c.Text()) + } else { + v.name = strings.TrimPrefix(v.originalName, f.trimPrefix) + } + f.values = append(f.values, v) + } + } + return false +} + +// Helpers + +// usize returns the number of bits of the smallest unsigned integer +// type that will hold n. Used to create the smallest possible slice of +// integers to use as indexes into the concatenated strings. +func usize(n int) int { + switch { + case n < 1<<8: + return 8 + case n < 1<<16: + return 16 + default: + // 2^32 is enough constants for anyone. + return 32 + } +} + +// declareIndexAndNameVars declares the index slices and concatenated names +// strings representing the runs of values. +func (g *Generator) declareIndexAndNameVars(runs [][]Value, typeName string) { + var indexes, names []string + for i, run := range runs { + index, name := g.createIndexAndNameDecl(run, typeName, fmt.Sprintf("_%d", i)) + if len(run) != 1 { + indexes = append(indexes, index) + } + names = append(names, name) + } + g.Printf("const (\n") + for _, name := range names { + g.Printf("\t%s\n", name) + } + g.Printf(")\n\n") + + if len(indexes) > 0 { + g.Printf("var (") + for _, index := range indexes { + g.Printf("\t%s\n", index) + } + g.Printf(")\n\n") + } +} + +// declareIndexAndNameVar is the single-run version of declareIndexAndNameVars +func (g *Generator) declareIndexAndNameVar(run []Value, typeName string) { + index, name := g.createIndexAndNameDecl(run, typeName, "") + g.Printf("const %s\n", name) + g.Printf("var %s\n", index) +} + +// createIndexAndNameDecl returns the pair of declarations for the run. The caller will add "const" and "var". +func (g *Generator) createIndexAndNameDecl(run []Value, typeName string, suffix string) (string, string) { + b := new(bytes.Buffer) + indexes := make([]int, len(run)) + for i := range run { + b.WriteString(run[i].name) + indexes[i] = b.Len() + } + nameConst := fmt.Sprintf("_%s_name%s = %q", typeName, suffix, b.String()) + nameLen := b.Len() + b.Reset() + fmt.Fprintf(b, "_%s_index%s = [...]uint%d{0, ", typeName, suffix, usize(nameLen)) + for i, v := range indexes { + if i > 0 { + fmt.Fprintf(b, ", ") + } + fmt.Fprintf(b, "%d", v) + } + fmt.Fprintf(b, "}") + return b.String(), nameConst +} + +// declareNameVars declares the concatenated names string representing all the values in the runs. +func (g *Generator) declareNameVars(runs [][]Value, typeName string, suffix string) { + g.Printf("const _%s_name%s = \"", typeName, suffix) + for _, run := range runs { + for i := range run { + g.Printf("%s", run[i].name) + } + } + g.Printf("\"\n") +} + +// buildOneRun generates the variables and String method for a single run of contiguous values. +func (g *Generator) buildOneRun(runs [][]Value, typeName string) { + values := runs[0] + g.Printf("\n") + g.declareIndexAndNameVar(values, typeName) + // The generated code is simple enough to write as a Printf format. + lessThanZero := "" + if values[0].signed { + lessThanZero = "i < 0 || " + } + if values[0].value == 0 { // Signed or unsigned, 0 is still 0. + g.Printf(stringOneRun, typeName, usize(len(values)), lessThanZero) + } else { + g.Printf(stringOneRunWithOffset, typeName, values[0].String(), usize(len(values)), lessThanZero) + } +} + +// Arguments to format are: +// +// [1]: type name +// [2]: size of index element (8 for uint8 etc.) +// [3]: less than zero check (for signed types) +const stringOneRun = `func (i %[1]s) String() string { + if %[3]si >= %[1]s(len(_%[1]s_index)-1) { + return "%[1]s(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _%[1]s_name[_%[1]s_index[i]:_%[1]s_index[i+1]] +} +` + +// Arguments to format are: +// [1]: type name +// [2]: lowest defined value for type, as a string +// [3]: size of index element (8 for uint8 etc.) +// [4]: less than zero check (for signed types) +/* + */ +const stringOneRunWithOffset = `func (i %[1]s) String() string { + i -= %[2]s + if %[4]si >= %[1]s(len(_%[1]s_index)-1) { + return "%[1]s(" + strconv.FormatInt(int64(i + %[2]s), 10) + ")" + } + return _%[1]s_name[_%[1]s_index[i] : _%[1]s_index[i+1]] +} +` + +// buildMultipleRuns generates the variables and String method for multiple runs of contiguous values. +// For this pattern, a single Printf format won't do. +func (g *Generator) buildMultipleRuns(runs [][]Value, typeName string) { + g.Printf("\n") + g.declareIndexAndNameVars(runs, typeName) + g.Printf("func (i %s) String() string {\n", typeName) + g.Printf("\tswitch {\n") + for i, values := range runs { + if len(values) == 1 { + g.Printf("\tcase i == %s:\n", &values[0]) + g.Printf("\t\treturn _%s_name_%d\n", typeName, i) + continue + } + if values[0].value == 0 && !values[0].signed { + // For an unsigned lower bound of 0, "0 <= i" would be redundant. + g.Printf("\tcase i <= %s:\n", &values[len(values)-1]) + } else { + g.Printf("\tcase %s <= i && i <= %s:\n", &values[0], &values[len(values)-1]) + } + if values[0].value != 0 { + g.Printf("\t\ti -= %s\n", &values[0]) + } + g.Printf("\t\treturn _%s_name_%d[_%s_index_%d[i]:_%s_index_%d[i+1]]\n", + typeName, i, typeName, i, typeName, i) + } + g.Printf("\tdefault:\n") + g.Printf("\t\treturn \"%s(\" + strconv.FormatInt(int64(i), 10) + \")\"\n", typeName) + g.Printf("\t}\n") + g.Printf("}\n") +} + +// buildMap handles the case where the space is so sparse a map is a reasonable fallback. +// It's a rare situation but has simple code. +func (g *Generator) buildMap(runs [][]Value, typeName string) { + g.Printf("\n") + g.declareNameVars(runs, typeName, "") + g.Printf("\nvar _%s_map = map[%s]string{\n", typeName, typeName) + n := 0 + for _, values := range runs { + for _, value := range values { + g.Printf("\t%s: _%s_name[%d:%d],\n", &value, typeName, n, n+len(value.name)) + n += len(value.name) + } + } + g.Printf("}\n\n") + g.Printf(stringMap, typeName) +} + +// Argument to format is the type name. +const stringMap = `func (i %[1]s) String() string { + if str, ok := _%[1]s_map[i]; ok { + return str + } + return "%[1]s(" + strconv.FormatInt(int64(i), 10) + ")" +} +` diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go new file mode 100644 index 0000000000000..165ede0f8f38d --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -0,0 +1,187 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gcexportdata provides functions for locating, reading, and +// writing export data files containing type information produced by the +// gc compiler. This package supports go1.7 export data format and all +// later versions. +// +// Although it might seem convenient for this package to live alongside +// go/types in the standard library, this would cause version skew +// problems for developer tools that use it, since they must be able to +// consume the outputs of the gc compiler both before and after a Go +// update such as from Go 1.7 to Go 1.8. Because this package lives in +// golang.org/x/tools, sites can update their version of this repo some +// time before the Go 1.8 release and rebuild and redeploy their +// developer tools, which will then be able to consume both Go 1.7 and +// Go 1.8 export data files, so they will work before and after the +// Go update. (See discussion at https://golang.org/issue/15651.) +package gcexportdata // import "golang.org/x/tools/go/gcexportdata" + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "go/token" + "go/types" + "io" + "os/exec" + + "golang.org/x/tools/internal/gcimporter" +) + +// Find returns the name of an object (.o) or archive (.a) file +// containing type information for the specified import path, +// using the go command. +// If no file was found, an empty filename is returned. +// +// A relative srcDir is interpreted relative to the current working directory. +// +// Find also returns the package's resolved (canonical) import path, +// reflecting the effects of srcDir and vendoring on importPath. +// +// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages, +// which is more efficient. +func Find(importPath, srcDir string) (filename, path string) { + cmd := exec.Command("go", "list", "-json", "-export", "--", importPath) + cmd.Dir = srcDir + out, err := cmd.CombinedOutput() + if err != nil { + return "", "" + } + var data struct { + ImportPath string + Export string + } + json.Unmarshal(out, &data) + return data.Export, data.ImportPath +} + +// NewReader returns a reader for the export data section of an object +// (.o) or archive (.a) file read from r. The new reader may provide +// additional trailing data beyond the end of the export data. +func NewReader(r io.Reader) (io.Reader, error) { + buf := bufio.NewReader(r) + _, size, err := gcimporter.FindExportData(buf) + if err != nil { + return nil, err + } + + if size >= 0 { + // We were given an archive and found the __.PKGDEF in it. + // This tells us the size of the export data, and we don't + // need to return the entire file. + return &io.LimitedReader{ + R: buf, + N: size, + }, nil + } else { + // We were given an object file. As such, we don't know how large + // the export data is and must return the entire file. + return buf, nil + } +} + +// readAll works the same way as io.ReadAll, but avoids allocations and copies +// by preallocating a byte slice of the necessary size if the size is known up +// front. This is always possible when the input is an archive. In that case, +// NewReader will return the known size using an io.LimitedReader. +func readAll(r io.Reader) ([]byte, error) { + if lr, ok := r.(*io.LimitedReader); ok { + data := make([]byte, lr.N) + _, err := io.ReadFull(lr, data) + return data, err + } + return io.ReadAll(r) +} + +// Read reads export data from in, decodes it, and returns type +// information for the package. +// +// The package path (effectively its linker symbol prefix) is +// specified by path, since unlike the package name, this information +// may not be recorded in the export data. +// +// File position information is added to fset. +// +// Read may inspect and add to the imports map to ensure that references +// within the export data to other packages are consistent. The caller +// must ensure that imports[path] does not exist, or exists but is +// incomplete (see types.Package.Complete), and Read inserts the +// resulting package into this map entry. +// +// On return, the state of the reader is undefined. +func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) { + data, err := readAll(in) + if err != nil { + return nil, fmt.Errorf("reading export data for %q: %v", path, err) + } + + if bytes.HasPrefix(data, []byte("!")) { + return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path) + } + + // The indexed export format starts with an 'i'; the older + // binary export format starts with a 'c', 'd', or 'v' + // (from "version"). Select appropriate importer. + if len(data) > 0 { + switch data[0] { + case 'i': + _, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path) + return pkg, err + + case 'v', 'c', 'd': + _, pkg, err := gcimporter.BImportData(fset, imports, data, path) + return pkg, err + + case 'u': + _, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path) + return pkg, err + + default: + l := len(data) + if l > 10 { + l = 10 + } + return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), path) + } + } + return nil, fmt.Errorf("empty export data for %s", path) +} + +// Write writes encoded type information for the specified package to out. +// The FileSet provides file position information for named objects. +func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error { + if _, err := io.WriteString(out, "i"); err != nil { + return err + } + return gcimporter.IExportData(out, fset, pkg) +} + +// ReadBundle reads an export bundle from in, decodes it, and returns type +// information for the packages. +// File position information is added to fset. +// +// ReadBundle may inspect and add to the imports map to ensure that references +// within the export bundle to other packages are consistent. +// +// On return, the state of the reader is undefined. +// +// Experimental: This API is experimental and may change in the future. +func ReadBundle(in io.Reader, fset *token.FileSet, imports map[string]*types.Package) ([]*types.Package, error) { + data, err := readAll(in) + if err != nil { + return nil, fmt.Errorf("reading export bundle: %v", err) + } + return gcimporter.IImportBundle(fset, imports, data) +} + +// WriteBundle writes encoded type information for the specified packages to out. +// The FileSet provides file position information for named objects. +// +// Experimental: This API is experimental and may change in the future. +func WriteBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { + return gcimporter.IExportBundle(out, fset, pkgs) +} diff --git a/vendor/golang.org/x/tools/go/gcexportdata/importer.go b/vendor/golang.org/x/tools/go/gcexportdata/importer.go new file mode 100644 index 0000000000000..37a7247e26867 --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcexportdata/importer.go @@ -0,0 +1,75 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcexportdata + +import ( + "fmt" + "go/token" + "go/types" + "os" +) + +// NewImporter returns a new instance of the types.Importer interface +// that reads type information from export data files written by gc. +// The Importer also satisfies types.ImporterFrom. +// +// Export data files are located using "go build" workspace conventions +// and the build.Default context. +// +// Use this importer instead of go/importer.For("gc", ...) to avoid the +// version-skew problems described in the documentation of this package, +// or to control the FileSet or access the imports map populated during +// package loading. +// +// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages, +// which is more efficient. +func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom { + return importer{fset, imports} +} + +type importer struct { + fset *token.FileSet + imports map[string]*types.Package +} + +func (imp importer) Import(importPath string) (*types.Package, error) { + return imp.ImportFrom(importPath, "", 0) +} + +func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) { + filename, path := Find(importPath, srcDir) + if filename == "" { + if importPath == "unsafe" { + // Even for unsafe, call Find first in case + // the package was vendored. + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %s", importPath) + } + + if pkg, ok := imp.imports[path]; ok && pkg.Complete() { + return pkg, nil // cache hit + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + f.Close() + if err != nil { + // add file name to error + err = fmt.Errorf("reading export data: %s: %v", filename, err) + } + }() + + r, err := NewReader(f) + if err != nil { + return nil, err + } + + return Read(r, imp.fset, imp.imports, path) +} diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go new file mode 100644 index 0000000000000..18a002f82a1f9 --- /dev/null +++ b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go @@ -0,0 +1,49 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packagesdriver fetches type sizes for go/packages and go/analysis. +package packagesdriver + +import ( + "context" + "fmt" + "go/types" + "strings" + + "golang.org/x/tools/internal/gocommand" +) + +var debug = false + +func GetSizesGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (types.Sizes, error) { + inv.Verb = "list" + inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} + stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) + var goarch, compiler string + if rawErr != nil { + if rawErrMsg := rawErr.Error(); strings.Contains(rawErrMsg, "cannot find main module") || strings.Contains(rawErrMsg, "go.mod file not found") { + // User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc. + // TODO(matloob): Is this a problem in practice? + inv.Verb = "env" + inv.Args = []string{"GOARCH"} + envout, enverr := gocmdRunner.Run(ctx, inv) + if enverr != nil { + return nil, enverr + } + goarch = strings.TrimSpace(envout.String()) + compiler = "gc" + } else { + return nil, friendlyErr + } + } else { + fields := strings.Fields(stdout.String()) + if len(fields) < 2 { + return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", + stdout.String(), stderr.String()) + } + goarch = fields[0] + compiler = fields[1] + } + return types.SizesFor(compiler, goarch), nil +} diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go new file mode 100644 index 0000000000000..da4ab89fe63f1 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -0,0 +1,220 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package packages loads Go packages for inspection and analysis. + +The Load function takes as input a list of patterns and return a list of Package +structs describing individual packages matched by those patterns. +The LoadMode controls the amount of detail in the loaded packages. + +Load passes most patterns directly to the underlying build tool, +but all patterns with the prefix "query=", where query is a +non-empty string of letters from [a-z], are reserved and may be +interpreted as query operators. + +Two query operators are currently supported: "file" and "pattern". + +The query "file=path/to/file.go" matches the package or packages enclosing +the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go" +might return the packages "fmt" and "fmt [fmt.test]". + +The query "pattern=string" causes "string" to be passed directly to +the underlying build tool. In most cases this is unnecessary, +but an application can use Load("pattern=" + x) as an escaping mechanism +to ensure that x is not interpreted as a query operator if it contains '='. + +All other query operators are reserved for future use and currently +cause Load to report an error. + +The Package struct provides basic information about the package, including + + - ID, a unique identifier for the package in the returned set; + - GoFiles, the names of the package's Go source files; + - Imports, a map from source import strings to the Packages they name; + - Types, the type information for the package's exported symbols; + - Syntax, the parsed syntax trees for the package's source code; and + - TypeInfo, the result of a complete type-check of the package syntax trees. + +(See the documentation for type Package for the complete list of fields +and more detailed descriptions.) + +For example, + + Load(nil, "bytes", "unicode...") + +returns four Package structs describing the standard library packages +bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern +can match multiple packages and that a package might be matched by +multiple patterns: in general it is not possible to determine which +packages correspond to which patterns. + +Note that the list returned by Load contains only the packages matched +by the patterns. Their dependencies can be found by walking the import +graph using the Imports fields. + +The Load function can be configured by passing a pointer to a Config as +the first argument. A nil Config is equivalent to the zero Config, which +causes Load to run in LoadFiles mode, collecting minimal information. +See the documentation for type Config for details. + +As noted earlier, the Config.Mode controls the amount of detail +reported about the loaded packages. See the documentation for type LoadMode +for details. + +Most tools should pass their command-line arguments (after any flags) +uninterpreted to the loader, so that the loader can interpret them +according to the conventions of the underlying build system. +See the Example function for typical usage. +*/ +package packages // import "golang.org/x/tools/go/packages" + +/* + +Motivation and design considerations + +The new package's design solves problems addressed by two existing +packages: go/build, which locates and describes packages, and +golang.org/x/tools/go/loader, which loads, parses and type-checks them. +The go/build.Package structure encodes too much of the 'go build' way +of organizing projects, leaving us in need of a data type that describes a +package of Go source code independent of the underlying build system. +We wanted something that works equally well with go build and vgo, and +also other build systems such as Bazel and Blaze, making it possible to +construct analysis tools that work in all these environments. +Tools such as errcheck and staticcheck were essentially unavailable to +the Go community at Google, and some of Google's internal tools for Go +are unavailable externally. +This new package provides a uniform way to obtain package metadata by +querying each of these build systems, optionally supporting their +preferred command-line notations for packages, so that tools integrate +neatly with users' build environments. The Metadata query function +executes an external query tool appropriate to the current workspace. + +Loading packages always returns the complete import graph "all the way down", +even if all you want is information about a single package, because the query +mechanisms of all the build systems we currently support ({go,vgo} list, and +blaze/bazel aspect-based query) cannot provide detailed information +about one package without visiting all its dependencies too, so there is +no additional asymptotic cost to providing transitive information. +(This property might not be true of a hypothetical 5th build system.) + +In calls to TypeCheck, all initial packages, and any package that +transitively depends on one of them, must be loaded from source. +Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from +source; D may be loaded from export data, and E may not be loaded at all +(though it's possible that D's export data mentions it, so a +types.Package may be created for it and exposed.) + +The old loader had a feature to suppress type-checking of function +bodies on a per-package basis, primarily intended to reduce the work of +obtaining type information for imported packages. Now that imports are +satisfied by export data, the optimization no longer seems necessary. + +Despite some early attempts, the old loader did not exploit export data, +instead always using the equivalent of WholeProgram mode. This was due +to the complexity of mixing source and export data packages (now +resolved by the upward traversal mentioned above), and because export data +files were nearly always missing or stale. Now that 'go build' supports +caching, all the underlying build systems can guarantee to produce +export data in a reasonable (amortized) time. + +Test "main" packages synthesized by the build system are now reported as +first-class packages, avoiding the need for clients (such as go/ssa) to +reinvent this generation logic. + +One way in which go/packages is simpler than the old loader is in its +treatment of in-package tests. In-package tests are packages that +consist of all the files of the library under test, plus the test files. +The old loader constructed in-package tests by a two-phase process of +mutation called "augmentation": first it would construct and type check +all the ordinary library packages and type-check the packages that +depend on them; then it would add more (test) files to the package and +type-check again. This two-phase approach had four major problems: +1) in processing the tests, the loader modified the library package, + leaving no way for a client application to see both the test + package and the library package; one would mutate into the other. +2) because test files can declare additional methods on types defined in + the library portion of the package, the dispatch of method calls in + the library portion was affected by the presence of the test files. + This should have been a clue that the packages were logically + different. +3) this model of "augmentation" assumed at most one in-package test + per library package, which is true of projects using 'go build', + but not other build systems. +4) because of the two-phase nature of test processing, all packages that + import the library package had to be processed before augmentation, + forcing a "one-shot" API and preventing the client from calling Load + in several times in sequence as is now possible in WholeProgram mode. + (TypeCheck mode has a similar one-shot restriction for a different reason.) + +Early drafts of this package supported "multi-shot" operation. +Although it allowed clients to make a sequence of calls (or concurrent +calls) to Load, building up the graph of Packages incrementally, +it was of marginal value: it complicated the API +(since it allowed some options to vary across calls but not others), +it complicated the implementation, +it cannot be made to work in Types mode, as explained above, +and it was less efficient than making one combined call (when this is possible). +Among the clients we have inspected, none made multiple calls to load +but could not be easily and satisfactorily modified to make only a single call. +However, applications changes may be required. +For example, the ssadump command loads the user-specified packages +and in addition the runtime package. It is tempting to simply append +"runtime" to the user-provided list, but that does not work if the user +specified an ad-hoc package such as [a.go b.go]. +Instead, ssadump no longer requests the runtime package, +but seeks it among the dependencies of the user-specified packages, +and emits an error if it is not found. + +Overlays: The Overlay field in the Config allows providing alternate contents +for Go source files, by providing a mapping from file path to contents. +go/packages will pull in new imports added in overlay files when go/packages +is run in LoadImports mode or greater. +Overlay support for the go list driver isn't complete yet: if the file doesn't +exist on disk, it will only be recognized in an overlay if it is a non-test file +and the package would be reported even without the overlay. + +Questions & Tasks + +- Add GOARCH/GOOS? + They are not portable concepts, but could be made portable. + Our goal has been to allow users to express themselves using the conventions + of the underlying build system: if the build system honors GOARCH + during a build and during a metadata query, then so should + applications built atop that query mechanism. + Conversely, if the target architecture of the build is determined by + command-line flags, the application can pass the relevant + flags through to the build system using a command such as: + myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin" + However, this approach is low-level, unwieldy, and non-portable. + GOOS and GOARCH seem important enough to warrant a dedicated option. + +- How should we handle partial failures such as a mixture of good and + malformed patterns, existing and non-existent packages, successful and + failed builds, import failures, import cycles, and so on, in a call to + Load? + +- Support bazel, blaze, and go1.10 list, not just go1.11 list. + +- Handle (and test) various partial success cases, e.g. + a mixture of good packages and: + invalid patterns + nonexistent packages + empty packages + packages with malformed package or import declarations + unreadable files + import cycles + other parse errors + type errors + Make sure we record errors at the correct place in the graph. + +- Missing packages among initial arguments are not reported. + Return bogus packages for them, like golist does. + +- "undeclared name" errors (for example) are reported out of source file + order. I suspect this is due to the breadth-first resolution now used + by go/types. Is that a bug? Discuss with gri. + +*/ diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go new file mode 100644 index 0000000000000..7242a0a7d2bef --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -0,0 +1,101 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file enables an external tool to intercept package requests. +// If the tool is present then its results are used in preference to +// the go list command. + +package packages + +import ( + "bytes" + "encoding/json" + "fmt" + exec "golang.org/x/sys/execabs" + "os" + "strings" +) + +// The Driver Protocol +// +// The driver, given the inputs to a call to Load, returns metadata about the packages specified. +// This allows for different build systems to support go/packages by telling go/packages how the +// packages' source is organized. +// The driver is a binary, either specified by the GOPACKAGESDRIVER environment variable or in +// the path as gopackagesdriver. It's given the inputs to load in its argv. See the package +// documentation in doc.go for the full description of the patterns that need to be supported. +// A driver receives as a JSON-serialized driverRequest struct in standard input and will +// produce a JSON-serialized driverResponse (see definition in packages.go) in its standard output. + +// driverRequest is used to provide the portion of Load's Config that is needed by a driver. +type driverRequest struct { + Mode LoadMode `json:"mode"` + // Env specifies the environment the underlying build system should be run in. + Env []string `json:"env"` + // BuildFlags are flags that should be passed to the underlying build system. + BuildFlags []string `json:"build_flags"` + // Tests specifies whether the patterns should also return test packages. + Tests bool `json:"tests"` + // Overlay maps file paths (relative to the driver's working directory) to the byte contents + // of overlay files. + Overlay map[string][]byte `json:"overlay"` +} + +// findExternalDriver returns the file path of a tool that supplies +// the build system package structure, or "" if not found." +// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its +// value, otherwise it searches for a binary named gopackagesdriver on the PATH. +func findExternalDriver(cfg *Config) driver { + const toolPrefix = "GOPACKAGESDRIVER=" + tool := "" + for _, env := range cfg.Env { + if val := strings.TrimPrefix(env, toolPrefix); val != env { + tool = val + } + } + if tool != "" && tool == "off" { + return nil + } + if tool == "" { + var err error + tool, err = exec.LookPath("gopackagesdriver") + if err != nil { + return nil + } + } + return func(cfg *Config, words ...string) (*driverResponse, error) { + req, err := json.Marshal(driverRequest{ + Mode: cfg.Mode, + Env: cfg.Env, + BuildFlags: cfg.BuildFlags, + Tests: cfg.Tests, + Overlay: cfg.Overlay, + }) + if err != nil { + return nil, fmt.Errorf("failed to encode message to driver tool: %v", err) + } + + buf := new(bytes.Buffer) + stderr := new(bytes.Buffer) + cmd := exec.CommandContext(cfg.Context, tool, words...) + cmd.Dir = cfg.Dir + cmd.Env = cfg.Env + cmd.Stdin = bytes.NewReader(req) + cmd.Stdout = buf + cmd.Stderr = stderr + + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr) + } + if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" { + fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd), stderr) + } + + var response driverResponse + if err := json.Unmarshal(buf.Bytes(), &response); err != nil { + return nil, err + } + return &response, nil + } +} diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go new file mode 100644 index 0000000000000..6bb7168d2e343 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -0,0 +1,1173 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "go/types" + "io/ioutil" + "log" + "os" + "path" + "path/filepath" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "unicode" + + exec "golang.org/x/sys/execabs" + "golang.org/x/tools/go/internal/packagesdriver" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/packagesinternal" +) + +// debug controls verbose logging. +var debug, _ = strconv.ParseBool(os.Getenv("GOPACKAGESDEBUG")) + +// A goTooOldError reports that the go command +// found by exec.LookPath is too old to use the new go list behavior. +type goTooOldError struct { + error +} + +// responseDeduper wraps a driverResponse, deduplicating its contents. +type responseDeduper struct { + seenRoots map[string]bool + seenPackages map[string]*Package + dr *driverResponse +} + +func newDeduper() *responseDeduper { + return &responseDeduper{ + dr: &driverResponse{}, + seenRoots: map[string]bool{}, + seenPackages: map[string]*Package{}, + } +} + +// addAll fills in r with a driverResponse. +func (r *responseDeduper) addAll(dr *driverResponse) { + for _, pkg := range dr.Packages { + r.addPackage(pkg) + } + for _, root := range dr.Roots { + r.addRoot(root) + } + r.dr.GoVersion = dr.GoVersion +} + +func (r *responseDeduper) addPackage(p *Package) { + if r.seenPackages[p.ID] != nil { + return + } + r.seenPackages[p.ID] = p + r.dr.Packages = append(r.dr.Packages, p) +} + +func (r *responseDeduper) addRoot(id string) { + if r.seenRoots[id] { + return + } + r.seenRoots[id] = true + r.dr.Roots = append(r.dr.Roots, id) +} + +type golistState struct { + cfg *Config + ctx context.Context + + envOnce sync.Once + goEnvError error + goEnv map[string]string + + rootsOnce sync.Once + rootDirsError error + rootDirs map[string]string + + goVersionOnce sync.Once + goVersionError error + goVersion int // The X in Go 1.X. + + // vendorDirs caches the (non)existence of vendor directories. + vendorDirs map[string]bool +} + +// getEnv returns Go environment variables. Only specific variables are +// populated -- computing all of them is slow. +func (state *golistState) getEnv() (map[string]string, error) { + state.envOnce.Do(func() { + var b *bytes.Buffer + b, state.goEnvError = state.invokeGo("env", "-json", "GOMOD", "GOPATH") + if state.goEnvError != nil { + return + } + + state.goEnv = make(map[string]string) + decoder := json.NewDecoder(b) + if state.goEnvError = decoder.Decode(&state.goEnv); state.goEnvError != nil { + return + } + }) + return state.goEnv, state.goEnvError +} + +// mustGetEnv is a convenience function that can be used if getEnv has already succeeded. +func (state *golistState) mustGetEnv() map[string]string { + env, err := state.getEnv() + if err != nil { + panic(fmt.Sprintf("mustGetEnv: %v", err)) + } + return env +} + +// goListDriver uses the go list command to interpret the patterns and produce +// the build system package structure. +// See driver for more details. +func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { + // Make sure that any asynchronous go commands are killed when we return. + parentCtx := cfg.Context + if parentCtx == nil { + parentCtx = context.Background() + } + ctx, cancel := context.WithCancel(parentCtx) + defer cancel() + + response := newDeduper() + + state := &golistState{ + cfg: cfg, + ctx: ctx, + vendorDirs: map[string]bool{}, + } + + // Fill in response.Sizes asynchronously if necessary. + var sizeserr error + var sizeswg sync.WaitGroup + if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { + sizeswg.Add(1) + go func() { + var sizes types.Sizes + sizes, sizeserr = packagesdriver.GetSizesGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) + // types.SizesFor always returns nil or a *types.StdSizes. + response.dr.Sizes, _ = sizes.(*types.StdSizes) + sizeswg.Done() + }() + } + + // Determine files requested in contains patterns + var containFiles []string + restPatterns := make([]string, 0, len(patterns)) + // Extract file= and other [querytype]= patterns. Report an error if querytype + // doesn't exist. +extractQueries: + for _, pattern := range patterns { + eqidx := strings.Index(pattern, "=") + if eqidx < 0 { + restPatterns = append(restPatterns, pattern) + } else { + query, value := pattern[:eqidx], pattern[eqidx+len("="):] + switch query { + case "file": + containFiles = append(containFiles, value) + case "pattern": + restPatterns = append(restPatterns, value) + case "": // not a reserved query + restPatterns = append(restPatterns, pattern) + default: + for _, rune := range query { + if rune < 'a' || rune > 'z' { // not a reserved query + restPatterns = append(restPatterns, pattern) + continue extractQueries + } + } + // Reject all other patterns containing "=" + return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern) + } + } + } + + // See if we have any patterns to pass through to go list. Zero initial + // patterns also requires a go list call, since it's the equivalent of + // ".". + if len(restPatterns) > 0 || len(patterns) == 0 { + dr, err := state.createDriverResponse(restPatterns...) + if err != nil { + return nil, err + } + response.addAll(dr) + } + + if len(containFiles) != 0 { + if err := state.runContainsQueries(response, containFiles); err != nil { + return nil, err + } + } + + // Only use go/packages' overlay processing if we're using a Go version + // below 1.16. Otherwise, go list handles it. + if goVersion, err := state.getGoVersion(); err == nil && goVersion < 16 { + modifiedPkgs, needPkgs, err := state.processGolistOverlay(response) + if err != nil { + return nil, err + } + + var containsCandidates []string + if len(containFiles) > 0 { + containsCandidates = append(containsCandidates, modifiedPkgs...) + containsCandidates = append(containsCandidates, needPkgs...) + } + if err := state.addNeededOverlayPackages(response, needPkgs); err != nil { + return nil, err + } + // Check candidate packages for containFiles. + if len(containFiles) > 0 { + for _, id := range containsCandidates { + pkg, ok := response.seenPackages[id] + if !ok { + response.addPackage(&Package{ + ID: id, + Errors: []Error{{ + Kind: ListError, + Msg: fmt.Sprintf("package %s expected but not seen", id), + }}, + }) + continue + } + for _, f := range containFiles { + for _, g := range pkg.GoFiles { + if sameFile(f, g) { + response.addRoot(id) + } + } + } + } + } + // Add root for any package that matches a pattern. This applies only to + // packages that are modified by overlays, since they are not added as + // roots automatically. + for _, pattern := range restPatterns { + match := matchPattern(pattern) + for _, pkgID := range modifiedPkgs { + pkg, ok := response.seenPackages[pkgID] + if !ok { + continue + } + if match(pkg.PkgPath) { + response.addRoot(pkg.ID) + } + } + } + } + + sizeswg.Wait() + if sizeserr != nil { + return nil, sizeserr + } + return response.dr, nil +} + +func (state *golistState) addNeededOverlayPackages(response *responseDeduper, pkgs []string) error { + if len(pkgs) == 0 { + return nil + } + dr, err := state.createDriverResponse(pkgs...) + if err != nil { + return err + } + for _, pkg := range dr.Packages { + response.addPackage(pkg) + } + _, needPkgs, err := state.processGolistOverlay(response) + if err != nil { + return err + } + return state.addNeededOverlayPackages(response, needPkgs) +} + +func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error { + for _, query := range queries { + // TODO(matloob): Do only one query per directory. + fdir := filepath.Dir(query) + // Pass absolute path of directory to go list so that it knows to treat it as a directory, + // not a package path. + pattern, err := filepath.Abs(fdir) + if err != nil { + return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err) + } + dirResponse, err := state.createDriverResponse(pattern) + + // If there was an error loading the package, or no packages are returned, + // or the package is returned with errors, try to load the file as an + // ad-hoc package. + // Usually the error will appear in a returned package, but may not if we're + // in module mode and the ad-hoc is located outside a module. + if err != nil || len(dirResponse.Packages) == 0 || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 && + len(dirResponse.Packages[0].Errors) == 1 { + var queryErr error + if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil { + return err // return the original error + } + } + isRoot := make(map[string]bool, len(dirResponse.Roots)) + for _, root := range dirResponse.Roots { + isRoot[root] = true + } + for _, pkg := range dirResponse.Packages { + // Add any new packages to the main set + // We don't bother to filter packages that will be dropped by the changes of roots, + // that will happen anyway during graph construction outside this function. + // Over-reporting packages is not a problem. + response.addPackage(pkg) + // if the package was not a root one, it cannot have the file + if !isRoot[pkg.ID] { + continue + } + for _, pkgFile := range pkg.GoFiles { + if filepath.Base(query) == filepath.Base(pkgFile) { + response.addRoot(pkg.ID) + break + } + } + } + } + return nil +} + +// adhocPackage attempts to load or construct an ad-hoc package for a given +// query, if the original call to the driver produced inadequate results. +func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, error) { + response, err := state.createDriverResponse(query) + if err != nil { + return nil, err + } + // If we get nothing back from `go list`, + // try to make this file into its own ad-hoc package. + // TODO(rstambler): Should this check against the original response? + if len(response.Packages) == 0 { + response.Packages = append(response.Packages, &Package{ + ID: "command-line-arguments", + PkgPath: query, + GoFiles: []string{query}, + CompiledGoFiles: []string{query}, + Imports: make(map[string]*Package), + }) + response.Roots = append(response.Roots, "command-line-arguments") + } + // Handle special cases. + if len(response.Packages) == 1 { + // golang/go#33482: If this is a file= query for ad-hoc packages where + // the file only exists on an overlay, and exists outside of a module, + // add the file to the package and remove the errors. + if response.Packages[0].ID == "command-line-arguments" || + filepath.ToSlash(response.Packages[0].PkgPath) == filepath.ToSlash(query) { + if len(response.Packages[0].GoFiles) == 0 { + filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath + // TODO(matloob): check if the file is outside of a root dir? + for path := range state.cfg.Overlay { + if path == filename { + response.Packages[0].Errors = nil + response.Packages[0].GoFiles = []string{path} + response.Packages[0].CompiledGoFiles = []string{path} + } + } + } + } + } + return response, nil +} + +// Fields must match go list; +// see $GOROOT/src/cmd/go/internal/load/pkg.go. +type jsonPackage struct { + ImportPath string + Dir string + Name string + Export string + GoFiles []string + CompiledGoFiles []string + IgnoredGoFiles []string + IgnoredOtherFiles []string + EmbedPatterns []string + EmbedFiles []string + CFiles []string + CgoFiles []string + CXXFiles []string + MFiles []string + HFiles []string + FFiles []string + SFiles []string + SwigFiles []string + SwigCXXFiles []string + SysoFiles []string + Imports []string + ImportMap map[string]string + Deps []string + Module *Module + TestGoFiles []string + TestImports []string + XTestGoFiles []string + XTestImports []string + ForTest string // q in a "p [q.test]" package, else "" + DepOnly bool + + Error *packagesinternal.PackageError + DepsErrors []*packagesinternal.PackageError +} + +type jsonPackageError struct { + ImportStack []string + Pos string + Err string +} + +func otherFiles(p *jsonPackage) [][]string { + return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles} +} + +// createDriverResponse uses the "go list" command to expand the pattern +// words and return a response for the specified packages. +func (state *golistState) createDriverResponse(words ...string) (*driverResponse, error) { + // go list uses the following identifiers in ImportPath and Imports: + // + // "p" -- importable package or main (command) + // "q.test" -- q's test executable + // "p [q.test]" -- variant of p as built for q's test executable + // "q_test [q.test]" -- q's external test package + // + // The packages p that are built differently for a test q.test + // are q itself, plus any helpers used by the external test q_test, + // typically including "testing" and all its dependencies. + + // Run "go list" for complete + // information on the specified packages. + goVersion, err := state.getGoVersion() + if err != nil { + return nil, err + } + buf, err := state.invokeGo("list", golistargs(state.cfg, words, goVersion)...) + if err != nil { + return nil, err + } + + seen := make(map[string]*jsonPackage) + pkgs := make(map[string]*Package) + additionalErrors := make(map[string][]Error) + // Decode the JSON and convert it to Package form. + response := &driverResponse{ + GoVersion: goVersion, + } + for dec := json.NewDecoder(buf); dec.More(); { + p := new(jsonPackage) + if err := dec.Decode(p); err != nil { + return nil, fmt.Errorf("JSON decoding failed: %v", err) + } + + if p.ImportPath == "" { + // The documentation for go list says that “[e]rroneous packages will have + // a non-empty ImportPath”. If for some reason it comes back empty, we + // prefer to error out rather than silently discarding data or handing + // back a package without any way to refer to it. + if p.Error != nil { + return nil, Error{ + Pos: p.Error.Pos, + Msg: p.Error.Err, + } + } + return nil, fmt.Errorf("package missing import path: %+v", p) + } + + // Work around https://golang.org/issue/33157: + // go list -e, when given an absolute path, will find the package contained at + // that directory. But when no package exists there, it will return a fake package + // with an error and the ImportPath set to the absolute path provided to go list. + // Try to convert that absolute path to what its package path would be if it's + // contained in a known module or GOPATH entry. This will allow the package to be + // properly "reclaimed" when overlays are processed. + if filepath.IsAbs(p.ImportPath) && p.Error != nil { + pkgPath, ok, err := state.getPkgPath(p.ImportPath) + if err != nil { + return nil, err + } + if ok { + p.ImportPath = pkgPath + } + } + + if old, found := seen[p.ImportPath]; found { + // If one version of the package has an error, and the other doesn't, assume + // that this is a case where go list is reporting a fake dependency variant + // of the imported package: When a package tries to invalidly import another + // package, go list emits a variant of the imported package (with the same + // import path, but with an error on it, and the package will have a + // DepError set on it). An example of when this can happen is for imports of + // main packages: main packages can not be imported, but they may be + // separately matched and listed by another pattern. + // See golang.org/issue/36188 for more details. + + // The plan is that eventually, hopefully in Go 1.15, the error will be + // reported on the importing package rather than the duplicate "fake" + // version of the imported package. Once all supported versions of Go + // have the new behavior this logic can be deleted. + // TODO(matloob): delete the workaround logic once all supported versions of + // Go return the errors on the proper package. + + // There should be exactly one version of a package that doesn't have an + // error. + if old.Error == nil && p.Error == nil { + if !reflect.DeepEqual(p, old) { + return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath) + } + continue + } + + // Determine if this package's error needs to be bubbled up. + // This is a hack, and we expect for go list to eventually set the error + // on the package. + if old.Error != nil { + var errkind string + if strings.Contains(old.Error.Err, "not an importable package") { + errkind = "not an importable package" + } else if strings.Contains(old.Error.Err, "use of internal package") && strings.Contains(old.Error.Err, "not allowed") { + errkind = "use of internal package not allowed" + } + if errkind != "" { + if len(old.Error.ImportStack) < 1 { + return nil, fmt.Errorf(`internal error: go list gave a %q error with empty import stack`, errkind) + } + importingPkg := old.Error.ImportStack[len(old.Error.ImportStack)-1] + if importingPkg == old.ImportPath { + // Using an older version of Go which put this package itself on top of import + // stack, instead of the importer. Look for importer in second from top + // position. + if len(old.Error.ImportStack) < 2 { + return nil, fmt.Errorf(`internal error: go list gave a %q error with an import stack without importing package`, errkind) + } + importingPkg = old.Error.ImportStack[len(old.Error.ImportStack)-2] + } + additionalErrors[importingPkg] = append(additionalErrors[importingPkg], Error{ + Pos: old.Error.Pos, + Msg: old.Error.Err, + Kind: ListError, + }) + } + } + + // Make sure that if there's a version of the package without an error, + // that's the one reported to the user. + if old.Error == nil { + continue + } + + // This package will replace the old one at the end of the loop. + } + seen[p.ImportPath] = p + + pkg := &Package{ + Name: p.Name, + ID: p.ImportPath, + GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), + CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), + OtherFiles: absJoin(p.Dir, otherFiles(p)...), + EmbedFiles: absJoin(p.Dir, p.EmbedFiles), + EmbedPatterns: absJoin(p.Dir, p.EmbedPatterns), + IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles), + forTest: p.ForTest, + depsErrors: p.DepsErrors, + Module: p.Module, + } + + if (state.cfg.Mode&typecheckCgo) != 0 && len(p.CgoFiles) != 0 { + if len(p.CompiledGoFiles) > len(p.GoFiles) { + // We need the cgo definitions, which are in the first + // CompiledGoFile after the non-cgo ones. This is a hack but there + // isn't currently a better way to find it. We also need the pure + // Go files and unprocessed cgo files, all of which are already + // in pkg.GoFiles. + cgoTypes := p.CompiledGoFiles[len(p.GoFiles)] + pkg.CompiledGoFiles = append([]string{cgoTypes}, pkg.GoFiles...) + } else { + // golang/go#38990: go list silently fails to do cgo processing + pkg.CompiledGoFiles = nil + pkg.Errors = append(pkg.Errors, Error{ + Msg: "go list failed to return CompiledGoFiles. This may indicate failure to perform cgo processing; try building at the command line. See https://golang.org/issue/38990.", + Kind: ListError, + }) + } + } + + // Work around https://golang.org/issue/28749: + // cmd/go puts assembly, C, and C++ files in CompiledGoFiles. + // Remove files from CompiledGoFiles that are non-go files + // (or are not files that look like they are from the cache). + if len(pkg.CompiledGoFiles) > 0 { + out := pkg.CompiledGoFiles[:0] + for _, f := range pkg.CompiledGoFiles { + if ext := filepath.Ext(f); ext != ".go" && ext != "" { // ext == "" means the file is from the cache, so probably cgo-processed file + continue + } + out = append(out, f) + } + pkg.CompiledGoFiles = out + } + + // Extract the PkgPath from the package's ID. + if i := strings.IndexByte(pkg.ID, ' '); i >= 0 { + pkg.PkgPath = pkg.ID[:i] + } else { + pkg.PkgPath = pkg.ID + } + + if pkg.PkgPath == "unsafe" { + pkg.GoFiles = nil // ignore fake unsafe.go file + } + + // Assume go list emits only absolute paths for Dir. + if p.Dir != "" && !filepath.IsAbs(p.Dir) { + log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir) + } + + if p.Export != "" && !filepath.IsAbs(p.Export) { + pkg.ExportFile = filepath.Join(p.Dir, p.Export) + } else { + pkg.ExportFile = p.Export + } + + // imports + // + // Imports contains the IDs of all imported packages. + // ImportsMap records (path, ID) only where they differ. + ids := make(map[string]bool) + for _, id := range p.Imports { + ids[id] = true + } + pkg.Imports = make(map[string]*Package) + for path, id := range p.ImportMap { + pkg.Imports[path] = &Package{ID: id} // non-identity import + delete(ids, id) + } + for id := range ids { + if id == "C" { + continue + } + + pkg.Imports[id] = &Package{ID: id} // identity import + } + if !p.DepOnly { + response.Roots = append(response.Roots, pkg.ID) + } + + // Work around for pre-go.1.11 versions of go list. + // TODO(matloob): they should be handled by the fallback. + // Can we delete this? + if len(pkg.CompiledGoFiles) == 0 { + pkg.CompiledGoFiles = pkg.GoFiles + } + + // Temporary work-around for golang/go#39986. Parse filenames out of + // error messages. This happens if there are unrecoverable syntax + // errors in the source, so we can't match on a specific error message. + if err := p.Error; err != nil && state.shouldAddFilenameFromError(p) { + addFilenameFromPos := func(pos string) bool { + split := strings.Split(pos, ":") + if len(split) < 1 { + return false + } + filename := strings.TrimSpace(split[0]) + if filename == "" { + return false + } + if !filepath.IsAbs(filename) { + filename = filepath.Join(state.cfg.Dir, filename) + } + info, _ := os.Stat(filename) + if info == nil { + return false + } + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, filename) + pkg.GoFiles = append(pkg.GoFiles, filename) + return true + } + found := addFilenameFromPos(err.Pos) + // In some cases, go list only reports the error position in the + // error text, not the error position. One such case is when the + // file's package name is a keyword (see golang.org/issue/39763). + if !found { + addFilenameFromPos(err.Err) + } + } + + if p.Error != nil { + msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363. + // Address golang.org/issue/35964 by appending import stack to error message. + if msg == "import cycle not allowed" && len(p.Error.ImportStack) != 0 { + msg += fmt.Sprintf(": import stack: %v", p.Error.ImportStack) + } + pkg.Errors = append(pkg.Errors, Error{ + Pos: p.Error.Pos, + Msg: msg, + Kind: ListError, + }) + } + + pkgs[pkg.ID] = pkg + } + + for id, errs := range additionalErrors { + if p, ok := pkgs[id]; ok { + p.Errors = append(p.Errors, errs...) + } + } + for _, pkg := range pkgs { + response.Packages = append(response.Packages, pkg) + } + sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID }) + + return response, nil +} + +func (state *golistState) shouldAddFilenameFromError(p *jsonPackage) bool { + if len(p.GoFiles) > 0 || len(p.CompiledGoFiles) > 0 { + return false + } + + goV, err := state.getGoVersion() + if err != nil { + return false + } + + // On Go 1.14 and earlier, only add filenames from errors if the import stack is empty. + // The import stack behaves differently for these versions than newer Go versions. + if goV < 15 { + return len(p.Error.ImportStack) == 0 + } + + // On Go 1.15 and later, only parse filenames out of error if there's no import stack, + // or the current package is at the top of the import stack. This is not guaranteed + // to work perfectly, but should avoid some cases where files in errors don't belong to this + // package. + return len(p.Error.ImportStack) == 0 || p.Error.ImportStack[len(p.Error.ImportStack)-1] == p.ImportPath +} + +// getGoVersion returns the effective minor version of the go command. +func (state *golistState) getGoVersion() (int, error) { + state.goVersionOnce.Do(func() { + state.goVersion, state.goVersionError = gocommand.GoVersion(state.ctx, state.cfgInvocation(), state.cfg.gocmdRunner) + }) + return state.goVersion, state.goVersionError +} + +// getPkgPath finds the package path of a directory if it's relative to a root +// directory. +func (state *golistState) getPkgPath(dir string) (string, bool, error) { + absDir, err := filepath.Abs(dir) + if err != nil { + return "", false, err + } + roots, err := state.determineRootDirs() + if err != nil { + return "", false, err + } + + for rdir, rpath := range roots { + // Make sure that the directory is in the module, + // to avoid creating a path relative to another module. + if !strings.HasPrefix(absDir, rdir) { + continue + } + // TODO(matloob): This doesn't properly handle symlinks. + r, err := filepath.Rel(rdir, dir) + if err != nil { + continue + } + if rpath != "" { + // We choose only one root even though the directory even it can belong in multiple modules + // or GOPATH entries. This is okay because we only need to work with absolute dirs when a + // file is missing from disk, for instance when gopls calls go/packages in an overlay. + // Once the file is saved, gopls, or the next invocation of the tool will get the correct + // result straight from golist. + // TODO(matloob): Implement module tiebreaking? + return path.Join(rpath, filepath.ToSlash(r)), true, nil + } + return filepath.ToSlash(r), true, nil + } + return "", false, nil +} + +// absJoin absolutizes and flattens the lists of files. +func absJoin(dir string, fileses ...[]string) (res []string) { + for _, files := range fileses { + for _, file := range files { + if !filepath.IsAbs(file) { + file = filepath.Join(dir, file) + } + res = append(res, file) + } + } + return res +} + +func jsonFlag(cfg *Config, goVersion int) string { + if goVersion < 19 { + return "-json" + } + var fields []string + added := make(map[string]bool) + addFields := func(fs ...string) { + for _, f := range fs { + if !added[f] { + added[f] = true + fields = append(fields, f) + } + } + } + addFields("Name", "ImportPath", "Error") // These fields are always needed + if cfg.Mode&NeedFiles != 0 || cfg.Mode&NeedTypes != 0 { + addFields("Dir", "GoFiles", "IgnoredGoFiles", "IgnoredOtherFiles", "CFiles", + "CgoFiles", "CXXFiles", "MFiles", "HFiles", "FFiles", "SFiles", + "SwigFiles", "SwigCXXFiles", "SysoFiles") + if cfg.Tests { + addFields("TestGoFiles", "XTestGoFiles") + } + } + if cfg.Mode&NeedTypes != 0 { + // CompiledGoFiles seems to be required for the test case TestCgoNoSyntax, + // even when -compiled isn't passed in. + // TODO(#52435): Should we make the test ask for -compiled, or automatically + // request CompiledGoFiles in certain circumstances? + addFields("Dir", "CompiledGoFiles") + } + if cfg.Mode&NeedCompiledGoFiles != 0 { + addFields("Dir", "CompiledGoFiles", "Export") + } + if cfg.Mode&NeedImports != 0 { + // When imports are requested, DepOnly is used to distinguish between packages + // explicitly requested and transitive imports of those packages. + addFields("DepOnly", "Imports", "ImportMap") + if cfg.Tests { + addFields("TestImports", "XTestImports") + } + } + if cfg.Mode&NeedDeps != 0 { + addFields("DepOnly") + } + if usesExportData(cfg) { + // Request Dir in the unlikely case Export is not absolute. + addFields("Dir", "Export") + } + if cfg.Mode&needInternalForTest != 0 { + addFields("ForTest") + } + if cfg.Mode&needInternalDepsErrors != 0 { + addFields("DepsErrors") + } + if cfg.Mode&NeedModule != 0 { + addFields("Module") + } + if cfg.Mode&NeedEmbedFiles != 0 { + addFields("EmbedFiles") + } + if cfg.Mode&NeedEmbedPatterns != 0 { + addFields("EmbedPatterns") + } + return "-json=" + strings.Join(fields, ",") +} + +func golistargs(cfg *Config, words []string, goVersion int) []string { + const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo + fullargs := []string{ + "-e", jsonFlag(cfg, goVersion), + fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypes|NeedTypesInfo|NeedTypesSizes) != 0), + fmt.Sprintf("-test=%t", cfg.Tests), + fmt.Sprintf("-export=%t", usesExportData(cfg)), + fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0), + // go list doesn't let you pass -test and -find together, + // probably because you'd just get the TestMain. + fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0 && !usesExportData(cfg)), + } + fullargs = append(fullargs, cfg.BuildFlags...) + fullargs = append(fullargs, "--") + fullargs = append(fullargs, words...) + return fullargs +} + +// cfgInvocation returns an Invocation that reflects cfg's settings. +func (state *golistState) cfgInvocation() gocommand.Invocation { + cfg := state.cfg + return gocommand.Invocation{ + BuildFlags: cfg.BuildFlags, + ModFile: cfg.modFile, + ModFlag: cfg.modFlag, + CleanEnv: cfg.Env != nil, + Env: cfg.Env, + Logf: cfg.Logf, + WorkingDir: cfg.Dir, + } +} + +// invokeGo returns the stdout of a go command invocation. +func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, error) { + cfg := state.cfg + + inv := state.cfgInvocation() + + // For Go versions 1.16 and above, `go list` accepts overlays directly via + // the -overlay flag. Set it, if it's available. + // + // The check for "list" is not necessarily required, but we should avoid + // getting the go version if possible. + if verb == "list" { + goVersion, err := state.getGoVersion() + if err != nil { + return nil, err + } + if goVersion >= 16 { + filename, cleanup, err := state.writeOverlays() + if err != nil { + return nil, err + } + defer cleanup() + inv.Overlay = filename + } + } + inv.Verb = verb + inv.Args = args + gocmdRunner := cfg.gocmdRunner + if gocmdRunner == nil { + gocmdRunner = &gocommand.Runner{} + } + stdout, stderr, friendlyErr, err := gocmdRunner.RunRaw(cfg.Context, inv) + if err != nil { + // Check for 'go' executable not being found. + if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { + return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound) + } + + exitErr, ok := err.(*exec.ExitError) + if !ok { + // Catastrophic error: + // - context cancellation + return nil, fmt.Errorf("couldn't run 'go': %w", err) + } + + // Old go version? + if strings.Contains(stderr.String(), "flag provided but not defined") { + return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)} + } + + // Related to #24854 + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "unexpected directory layout") { + return nil, friendlyErr + } + + // Is there an error running the C compiler in cgo? This will be reported in the "Error" field + // and should be suppressed by go list -e. + // + // This condition is not perfect yet because the error message can include other error messages than runtime/cgo. + isPkgPathRune := func(r rune) bool { + // From https://golang.org/ref/spec#Import_declarations: + // Implementation restriction: A compiler may restrict ImportPaths to non-empty strings + // using only characters belonging to Unicode's L, M, N, P, and S general categories + // (the Graphic characters without spaces) and may also exclude the + // characters !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character U+FFFD. + return unicode.IsOneOf([]*unicode.RangeTable{unicode.L, unicode.M, unicode.N, unicode.P, unicode.S}, r) && + !strings.ContainsRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r) + } + // golang/go#36770: Handle case where cmd/go prints module download messages before the error. + msg := stderr.String() + for strings.HasPrefix(msg, "go: downloading") { + msg = msg[strings.IndexRune(msg, '\n')+1:] + } + if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") { + msg := msg[len("# "):] + if strings.HasPrefix(strings.TrimLeftFunc(msg, isPkgPathRune), "\n") { + return stdout, nil + } + // Treat pkg-config errors as a special case (golang.org/issue/36770). + if strings.HasPrefix(msg, "pkg-config") { + return stdout, nil + } + } + + // This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show + // the error in the Err section of stdout in case -e option is provided. + // This fix is provided for backwards compatibility. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Similar to the previous error, but currently lacks a fix in Go. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must all be in one directory") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Backwards compatibility for Go 1.11 because 1.12 and 1.13 put the directory in the ImportPath. + // If the package doesn't exist, put the absolute path of the directory into the error message, + // as Go 1.13 list does. + const noSuchDirectory = "no such directory" + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), noSuchDirectory) { + errstr := stderr.String() + abspath := strings.TrimSpace(errstr[strings.Index(errstr, noSuchDirectory)+len(noSuchDirectory):]) + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + abspath, strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist. + // Note that the error message we look for in this case is different that the one looked for above. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") { + output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for #34273. go list -e with GO111MODULE=on has incorrect behavior when listing a + // directory outside any module. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside available modules") { + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + // TODO(matloob): command-line-arguments isn't correct here. + "command-line-arguments", strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Another variation of the previous error + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside module root") { + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + // TODO(matloob): command-line-arguments isn't correct here. + "command-line-arguments", strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + + // Workaround for an instance of golang.org/issue/26755: go list -e will return a non-zero exit + // status if there's a dependency on a package that doesn't exist. But it should return + // a zero exit status and set an error on that package. + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no Go files in") { + // Don't clobber stdout if `go list` actually returned something. + if len(stdout.String()) > 0 { + return stdout, nil + } + // try to extract package name from string + stderrStr := stderr.String() + var importPath string + colon := strings.Index(stderrStr, ":") + if colon > 0 && strings.HasPrefix(stderrStr, "go build ") { + importPath = stderrStr[len("go build "):colon] + } + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + importPath, strings.Trim(stderrStr, "\n")) + return bytes.NewBufferString(output), nil + } + + // Export mode entails a build. + // If that build fails, errors appear on stderr + // (despite the -e flag) and the Export field is blank. + // Do not fail in that case. + // The same is true if an ad-hoc package given to go list doesn't exist. + // TODO(matloob): Remove these once we can depend on go list to exit with a zero status with -e even when + // packages don't exist or a build fails. + if !usesExportData(cfg) && !containsGoFile(args) { + return nil, friendlyErr + } + } + return stdout, nil +} + +// OverlayJSON is the format overlay files are expected to be in. +// The Replace map maps from overlaid paths to replacement paths: +// the Go command will forward all reads trying to open +// each overlaid path to its replacement path, or consider the overlaid +// path not to exist if the replacement path is empty. +// +// From golang/go#39958. +type OverlayJSON struct { + Replace map[string]string `json:"replace,omitempty"` +} + +// writeOverlays writes out files for go list's -overlay flag, as described +// above. +func (state *golistState) writeOverlays() (filename string, cleanup func(), err error) { + // Do nothing if there are no overlays in the config. + if len(state.cfg.Overlay) == 0 { + return "", func() {}, nil + } + dir, err := ioutil.TempDir("", "gopackages-*") + if err != nil { + return "", nil, err + } + // The caller must clean up this directory, unless this function returns an + // error. + cleanup = func() { + os.RemoveAll(dir) + } + defer func() { + if err != nil { + cleanup() + } + }() + overlays := map[string]string{} + for k, v := range state.cfg.Overlay { + // Create a unique filename for the overlaid files, to avoid + // creating nested directories. + noSeparator := strings.Join(strings.Split(filepath.ToSlash(k), "/"), "") + f, err := ioutil.TempFile(dir, fmt.Sprintf("*-%s", noSeparator)) + if err != nil { + return "", func() {}, err + } + if _, err := f.Write(v); err != nil { + return "", func() {}, err + } + if err := f.Close(); err != nil { + return "", func() {}, err + } + overlays[k] = f.Name() + } + b, err := json.Marshal(OverlayJSON{Replace: overlays}) + if err != nil { + return "", func() {}, err + } + // Write out the overlay file that contains the filepath mappings. + filename = filepath.Join(dir, "overlay.json") + if err := ioutil.WriteFile(filename, b, 0665); err != nil { + return "", func() {}, err + } + return filename, cleanup, nil +} + +func containsGoFile(s []string) bool { + for _, f := range s { + if strings.HasSuffix(f, ".go") { + return true + } + } + return false +} + +func cmdDebugStr(cmd *exec.Cmd) string { + env := make(map[string]string) + for _, kv := range cmd.Env { + split := strings.SplitN(kv, "=", 2) + k, v := split[0], split[1] + env[k] = v + } + + var args []string + for _, arg := range cmd.Args { + quoted := strconv.Quote(arg) + if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") { + args = append(args, quoted) + } else { + args = append(args, arg) + } + } + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) +} diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go new file mode 100644 index 0000000000000..9576b472f9cc5 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -0,0 +1,575 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "encoding/json" + "fmt" + "go/parser" + "go/token" + "os" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + + "golang.org/x/tools/internal/gocommand" +) + +// processGolistOverlay provides rudimentary support for adding +// files that don't exist on disk to an overlay. The results can be +// sometimes incorrect. +// TODO(matloob): Handle unsupported cases, including the following: +// - determining the correct package to add given a new import path +func (state *golistState) processGolistOverlay(response *responseDeduper) (modifiedPkgs, needPkgs []string, err error) { + havePkgs := make(map[string]string) // importPath -> non-test package ID + needPkgsSet := make(map[string]bool) + modifiedPkgsSet := make(map[string]bool) + + pkgOfDir := make(map[string][]*Package) + for _, pkg := range response.dr.Packages { + // This is an approximation of import path to id. This can be + // wrong for tests, vendored packages, and a number of other cases. + havePkgs[pkg.PkgPath] = pkg.ID + dir, err := commonDir(pkg.GoFiles) + if err != nil { + return nil, nil, err + } + if dir != "" { + pkgOfDir[dir] = append(pkgOfDir[dir], pkg) + } + } + + // If no new imports are added, it is safe to avoid loading any needPkgs. + // Otherwise, it's hard to tell which package is actually being loaded + // (due to vendoring) and whether any modified package will show up + // in the transitive set of dependencies (because new imports are added, + // potentially modifying the transitive set of dependencies). + var overlayAddsImports bool + + // If both a package and its test package are created by the overlay, we + // need the real package first. Process all non-test files before test + // files, and make the whole process deterministic while we're at it. + var overlayFiles []string + for opath := range state.cfg.Overlay { + overlayFiles = append(overlayFiles, opath) + } + sort.Slice(overlayFiles, func(i, j int) bool { + iTest := strings.HasSuffix(overlayFiles[i], "_test.go") + jTest := strings.HasSuffix(overlayFiles[j], "_test.go") + if iTest != jTest { + return !iTest // non-tests are before tests. + } + return overlayFiles[i] < overlayFiles[j] + }) + for _, opath := range overlayFiles { + contents := state.cfg.Overlay[opath] + base := filepath.Base(opath) + dir := filepath.Dir(opath) + var pkg *Package // if opath belongs to both a package and its test variant, this will be the test variant + var testVariantOf *Package // if opath is a test file, this is the package it is testing + var fileExists bool + isTestFile := strings.HasSuffix(opath, "_test.go") + pkgName, ok := extractPackageName(opath, contents) + if !ok { + // Don't bother adding a file that doesn't even have a parsable package statement + // to the overlay. + continue + } + // If all the overlay files belong to a different package, change the + // package name to that package. + maybeFixPackageName(pkgName, isTestFile, pkgOfDir[dir]) + nextPackage: + for _, p := range response.dr.Packages { + if pkgName != p.Name && p.ID != "command-line-arguments" { + continue + } + for _, f := range p.GoFiles { + if !sameFile(filepath.Dir(f), dir) { + continue + } + // Make sure to capture information on the package's test variant, if needed. + if isTestFile && !hasTestFiles(p) { + // TODO(matloob): Are there packages other than the 'production' variant + // of a package that this can match? This shouldn't match the test main package + // because the file is generated in another directory. + testVariantOf = p + continue nextPackage + } else if !isTestFile && hasTestFiles(p) { + // We're examining a test variant, but the overlaid file is + // a non-test file. Because the overlay implementation + // (currently) only adds a file to one package, skip this + // package, so that we can add the file to the production + // variant of the package. (https://golang.org/issue/36857 + // tracks handling overlays on both the production and test + // variant of a package). + continue nextPackage + } + if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath { + // We have already seen the production version of the + // for which p is a test variant. + if hasTestFiles(p) { + testVariantOf = pkg + } + } + pkg = p + if filepath.Base(f) == base { + fileExists = true + } + } + } + // The overlay could have included an entirely new package or an + // ad-hoc package. An ad-hoc package is one that we have manually + // constructed from inadequate `go list` results for a file= query. + // It will have the ID command-line-arguments. + if pkg == nil || pkg.ID == "command-line-arguments" { + // Try to find the module or gopath dir the file is contained in. + // Then for modules, add the module opath to the beginning. + pkgPath, ok, err := state.getPkgPath(dir) + if err != nil { + return nil, nil, err + } + if !ok { + break + } + var forTest string // only set for x tests + isXTest := strings.HasSuffix(pkgName, "_test") + if isXTest { + forTest = pkgPath + pkgPath += "_test" + } + id := pkgPath + if isTestFile { + if isXTest { + id = fmt.Sprintf("%s [%s.test]", pkgPath, forTest) + } else { + id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath) + } + } + if pkg != nil { + // TODO(rstambler): We should change the package's path and ID + // here. The only issue is that this messes with the roots. + } else { + // Try to reclaim a package with the same ID, if it exists in the response. + for _, p := range response.dr.Packages { + if reclaimPackage(p, id, opath, contents) { + pkg = p + break + } + } + // Otherwise, create a new package. + if pkg == nil { + pkg = &Package{ + PkgPath: pkgPath, + ID: id, + Name: pkgName, + Imports: make(map[string]*Package), + } + response.addPackage(pkg) + havePkgs[pkg.PkgPath] = id + // Add the production package's sources for a test variant. + if isTestFile && !isXTest && testVariantOf != nil { + pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...) + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...) + // Add the package under test and its imports to the test variant. + pkg.forTest = testVariantOf.PkgPath + for k, v := range testVariantOf.Imports { + pkg.Imports[k] = &Package{ID: v.ID} + } + } + if isXTest { + pkg.forTest = forTest + } + } + } + } + if !fileExists { + pkg.GoFiles = append(pkg.GoFiles, opath) + // TODO(matloob): Adding the file to CompiledGoFiles can exhibit the wrong behavior + // if the file will be ignored due to its build tags. + pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, opath) + modifiedPkgsSet[pkg.ID] = true + } + imports, err := extractImports(opath, contents) + if err != nil { + // Let the parser or type checker report errors later. + continue + } + for _, imp := range imports { + // TODO(rstambler): If the package is an x test and the import has + // a test variant, make sure to replace it. + if _, found := pkg.Imports[imp]; found { + continue + } + overlayAddsImports = true + id, ok := havePkgs[imp] + if !ok { + var err error + id, err = state.resolveImport(dir, imp) + if err != nil { + return nil, nil, err + } + } + pkg.Imports[imp] = &Package{ID: id} + // Add dependencies to the non-test variant version of this package as well. + if testVariantOf != nil { + testVariantOf.Imports[imp] = &Package{ID: id} + } + } + } + + // toPkgPath guesses the package path given the id. + toPkgPath := func(sourceDir, id string) (string, error) { + if i := strings.IndexByte(id, ' '); i >= 0 { + return state.resolveImport(sourceDir, id[:i]) + } + return state.resolveImport(sourceDir, id) + } + + // Now that new packages have been created, do another pass to determine + // the new set of missing packages. + for _, pkg := range response.dr.Packages { + for _, imp := range pkg.Imports { + if len(pkg.GoFiles) == 0 { + return nil, nil, fmt.Errorf("cannot resolve imports for package %q with no Go files", pkg.PkgPath) + } + pkgPath, err := toPkgPath(filepath.Dir(pkg.GoFiles[0]), imp.ID) + if err != nil { + return nil, nil, err + } + if _, ok := havePkgs[pkgPath]; !ok { + needPkgsSet[pkgPath] = true + } + } + } + + if overlayAddsImports { + needPkgs = make([]string, 0, len(needPkgsSet)) + for pkg := range needPkgsSet { + needPkgs = append(needPkgs, pkg) + } + } + modifiedPkgs = make([]string, 0, len(modifiedPkgsSet)) + for pkg := range modifiedPkgsSet { + modifiedPkgs = append(modifiedPkgs, pkg) + } + return modifiedPkgs, needPkgs, err +} + +// resolveImport finds the ID of a package given its import path. +// In particular, it will find the right vendored copy when in GOPATH mode. +func (state *golistState) resolveImport(sourceDir, importPath string) (string, error) { + env, err := state.getEnv() + if err != nil { + return "", err + } + if env["GOMOD"] != "" { + return importPath, nil + } + + searchDir := sourceDir + for { + vendorDir := filepath.Join(searchDir, "vendor") + exists, ok := state.vendorDirs[vendorDir] + if !ok { + info, err := os.Stat(vendorDir) + exists = err == nil && info.IsDir() + state.vendorDirs[vendorDir] = exists + } + + if exists { + vendoredPath := filepath.Join(vendorDir, importPath) + if info, err := os.Stat(vendoredPath); err == nil && info.IsDir() { + // We should probably check for .go files here, but shame on anyone who fools us. + path, ok, err := state.getPkgPath(vendoredPath) + if err != nil { + return "", err + } + if ok { + return path, nil + } + } + } + + // We know we've hit the top of the filesystem when we Dir / and get /, + // or C:\ and get C:\, etc. + next := filepath.Dir(searchDir) + if next == searchDir { + break + } + searchDir = next + } + return importPath, nil +} + +func hasTestFiles(p *Package) bool { + for _, f := range p.GoFiles { + if strings.HasSuffix(f, "_test.go") { + return true + } + } + return false +} + +// determineRootDirs returns a mapping from absolute directories that could +// contain code to their corresponding import path prefixes. +func (state *golistState) determineRootDirs() (map[string]string, error) { + env, err := state.getEnv() + if err != nil { + return nil, err + } + if env["GOMOD"] != "" { + state.rootsOnce.Do(func() { + state.rootDirs, state.rootDirsError = state.determineRootDirsModules() + }) + } else { + state.rootsOnce.Do(func() { + state.rootDirs, state.rootDirsError = state.determineRootDirsGOPATH() + }) + } + return state.rootDirs, state.rootDirsError +} + +func (state *golistState) determineRootDirsModules() (map[string]string, error) { + // List all of the modules--the first will be the directory for the main + // module. Any replaced modules will also need to be treated as roots. + // Editing files in the module cache isn't a great idea, so we don't + // plan to ever support that. + out, err := state.invokeGo("list", "-m", "-json", "all") + if err != nil { + // 'go list all' will fail if we're outside of a module and + // GO111MODULE=on. Try falling back without 'all'. + var innerErr error + out, innerErr = state.invokeGo("list", "-m", "-json") + if innerErr != nil { + return nil, err + } + } + roots := map[string]string{} + modules := map[string]string{} + var i int + for dec := json.NewDecoder(out); dec.More(); { + mod := new(gocommand.ModuleJSON) + if err := dec.Decode(mod); err != nil { + return nil, err + } + if mod.Dir != "" && mod.Path != "" { + // This is a valid module; add it to the map. + absDir, err := filepath.Abs(mod.Dir) + if err != nil { + return nil, err + } + modules[absDir] = mod.Path + // The first result is the main module. + if i == 0 || mod.Replace != nil && mod.Replace.Path != "" { + roots[absDir] = mod.Path + } + } + i++ + } + return roots, nil +} + +func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) { + m := map[string]string{} + for _, dir := range filepath.SplitList(state.mustGetEnv()["GOPATH"]) { + absDir, err := filepath.Abs(dir) + if err != nil { + return nil, err + } + m[filepath.Join(absDir, "src")] = "" + } + return m, nil +} + +func extractImports(filename string, contents []byte) ([]string, error) { + f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.ImportsOnly) // TODO(matloob): reuse fileset? + if err != nil { + return nil, err + } + var res []string + for _, imp := range f.Imports { + quotedPath := imp.Path.Value + path, err := strconv.Unquote(quotedPath) + if err != nil { + return nil, err + } + res = append(res, path) + } + return res, nil +} + +// reclaimPackage attempts to reuse a package that failed to load in an overlay. +// +// If the package has errors and has no Name, GoFiles, or Imports, +// then it's possible that it doesn't yet exist on disk. +func reclaimPackage(pkg *Package, id string, filename string, contents []byte) bool { + // TODO(rstambler): Check the message of the actual error? + // It differs between $GOPATH and module mode. + if pkg.ID != id { + return false + } + if len(pkg.Errors) != 1 { + return false + } + if pkg.Name != "" || pkg.ExportFile != "" { + return false + } + if len(pkg.GoFiles) > 0 || len(pkg.CompiledGoFiles) > 0 || len(pkg.OtherFiles) > 0 { + return false + } + if len(pkg.Imports) > 0 { + return false + } + pkgName, ok := extractPackageName(filename, contents) + if !ok { + return false + } + pkg.Name = pkgName + pkg.Errors = nil + return true +} + +func extractPackageName(filename string, contents []byte) (string, bool) { + // TODO(rstambler): Check the message of the actual error? + // It differs between $GOPATH and module mode. + f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.PackageClauseOnly) // TODO(matloob): reuse fileset? + if err != nil { + return "", false + } + return f.Name.Name, true +} + +// commonDir returns the directory that all files are in, "" if files is empty, +// or an error if they aren't in the same directory. +func commonDir(files []string) (string, error) { + seen := make(map[string]bool) + for _, f := range files { + seen[filepath.Dir(f)] = true + } + if len(seen) > 1 { + return "", fmt.Errorf("files (%v) are in more than one directory: %v", files, seen) + } + for k := range seen { + // seen has only one element; return it. + return k, nil + } + return "", nil // no files +} + +// It is possible that the files in the disk directory dir have a different package +// name from newName, which is deduced from the overlays. If they all have a different +// package name, and they all have the same package name, then that name becomes +// the package name. +// It returns true if it changes the package name, false otherwise. +func maybeFixPackageName(newName string, isTestFile bool, pkgsOfDir []*Package) { + names := make(map[string]int) + for _, p := range pkgsOfDir { + names[p.Name]++ + } + if len(names) != 1 { + // some files are in different packages + return + } + var oldName string + for k := range names { + oldName = k + } + if newName == oldName { + return + } + // We might have a case where all of the package names in the directory are + // the same, but the overlay file is for an x test, which belongs to its + // own package. If the x test does not yet exist on disk, we may not yet + // have its package name on disk, but we should not rename the packages. + // + // We use a heuristic to determine if this file belongs to an x test: + // The test file should have a package name whose package name has a _test + // suffix or looks like "newName_test". + maybeXTest := strings.HasPrefix(oldName+"_test", newName) || strings.HasSuffix(newName, "_test") + if isTestFile && maybeXTest { + return + } + for _, p := range pkgsOfDir { + p.Name = newName + } +} + +// This function is copy-pasted from +// https://github.com/golang/go/blob/9706f510a5e2754595d716bd64be8375997311fb/src/cmd/go/internal/search/search.go#L360. +// It should be deleted when we remove support for overlays from go/packages. +// +// NOTE: This does not handle any ./... or ./ style queries, as this function +// doesn't know the working directory. +// +// matchPattern(pattern)(name) reports whether +// name matches pattern. Pattern is a limited glob +// pattern in which '...' means 'any string' and there +// is no other special syntax. +// Unfortunately, there are two special cases. Quoting "go help packages": +// +// First, /... at the end of the pattern can match an empty string, +// so that net/... matches both net and packages in its subdirectories, like net/http. +// Second, any slash-separated pattern element containing a wildcard never +// participates in a match of the "vendor" element in the path of a vendored +// package, so that ./... does not match packages in subdirectories of +// ./vendor or ./mycode/vendor, but ./vendor/... and ./mycode/vendor/... do. +// Note, however, that a directory named vendor that itself contains code +// is not a vendored package: cmd/vendor would be a command named vendor, +// and the pattern cmd/... matches it. +func matchPattern(pattern string) func(name string) bool { + // Convert pattern to regular expression. + // The strategy for the trailing /... is to nest it in an explicit ? expression. + // The strategy for the vendor exclusion is to change the unmatchable + // vendor strings to a disallowed code point (vendorChar) and to use + // "(anything but that codepoint)*" as the implementation of the ... wildcard. + // This is a bit complicated but the obvious alternative, + // namely a hand-written search like in most shell glob matchers, + // is too easy to make accidentally exponential. + // Using package regexp guarantees linear-time matching. + + const vendorChar = "\x00" + + if strings.Contains(pattern, vendorChar) { + return func(name string) bool { return false } + } + + re := regexp.QuoteMeta(pattern) + re = replaceVendor(re, vendorChar) + switch { + case strings.HasSuffix(re, `/`+vendorChar+`/\.\.\.`): + re = strings.TrimSuffix(re, `/`+vendorChar+`/\.\.\.`) + `(/vendor|/` + vendorChar + `/\.\.\.)` + case re == vendorChar+`/\.\.\.`: + re = `(/vendor|/` + vendorChar + `/\.\.\.)` + case strings.HasSuffix(re, `/\.\.\.`): + re = strings.TrimSuffix(re, `/\.\.\.`) + `(/\.\.\.)?` + } + re = strings.ReplaceAll(re, `\.\.\.`, `[^`+vendorChar+`]*`) + + reg := regexp.MustCompile(`^` + re + `$`) + + return func(name string) bool { + if strings.Contains(name, vendorChar) { + return false + } + return reg.MatchString(replaceVendor(name, vendorChar)) + } +} + +// replaceVendor returns the result of replacing +// non-trailing vendor path elements in x with repl. +func replaceVendor(x, repl string) string { + if !strings.Contains(x, "vendor") { + return x + } + elem := strings.Split(x, "/") + for i := 0; i < len(elem)-1; i++ { + if elem[i] == "vendor" { + elem[i] = repl + } + } + return strings.Join(elem, "/") +} diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go new file mode 100644 index 0000000000000..5c080d21b54a9 --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -0,0 +1,57 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "fmt" + "strings" +) + +var allModes = []LoadMode{ + NeedName, + NeedFiles, + NeedCompiledGoFiles, + NeedImports, + NeedDeps, + NeedExportFile, + NeedTypes, + NeedSyntax, + NeedTypesInfo, + NeedTypesSizes, +} + +var modeStrings = []string{ + "NeedName", + "NeedFiles", + "NeedCompiledGoFiles", + "NeedImports", + "NeedDeps", + "NeedExportFile", + "NeedTypes", + "NeedSyntax", + "NeedTypesInfo", + "NeedTypesSizes", +} + +func (mod LoadMode) String() string { + m := mod + if m == 0 { + return "LoadMode(0)" + } + var out []string + for i, x := range allModes { + if x > m { + break + } + if (m & x) != 0 { + out = append(out, modeStrings[i]) + m = m ^ x + } + } + if m != 0 { + out = append(out, "Unknown") + } + return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|")) +} diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go new file mode 100644 index 0000000000000..1d5f0e45b2bbf --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -0,0 +1,1318 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +// See doc.go for package documentation and implementation notes. + +import ( + "context" + "encoding/json" + "fmt" + "go/ast" + "go/parser" + "go/scanner" + "go/token" + "go/types" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/tools/go/gcexportdata" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/packagesinternal" + "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/typesinternal" +) + +// A LoadMode controls the amount of detail to return when loading. +// The bits below can be combined to specify which fields should be +// filled in the result packages. +// The zero value is a special case, equivalent to combining +// the NeedName, NeedFiles, and NeedCompiledGoFiles bits. +// ID and Errors (if present) will always be filled. +// Load may return more information than requested. +type LoadMode int + +const ( + // NeedName adds Name and PkgPath. + NeedName LoadMode = 1 << iota + + // NeedFiles adds GoFiles and OtherFiles. + NeedFiles + + // NeedCompiledGoFiles adds CompiledGoFiles. + NeedCompiledGoFiles + + // NeedImports adds Imports. If NeedDeps is not set, the Imports field will contain + // "placeholder" Packages with only the ID set. + NeedImports + + // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. + NeedDeps + + // NeedExportFile adds ExportFile. + NeedExportFile + + // NeedTypes adds Types, Fset, and IllTyped. + NeedTypes + + // NeedSyntax adds Syntax. + NeedSyntax + + // NeedTypesInfo adds TypesInfo. + NeedTypesInfo + + // NeedTypesSizes adds TypesSizes. + NeedTypesSizes + + // needInternalDepsErrors adds the internal deps errors field for use by gopls. + needInternalDepsErrors + + // needInternalForTest adds the internal forTest field. + // Tests must also be set on the context for this field to be populated. + needInternalForTest + + // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+. + // Modifies CompiledGoFiles and Types, and has no effect on its own. + typecheckCgo + + // NeedModule adds Module. + NeedModule + + // NeedEmbedFiles adds EmbedFiles. + NeedEmbedFiles + + // NeedEmbedPatterns adds EmbedPatterns. + NeedEmbedPatterns +) + +const ( + // Deprecated: LoadFiles exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles + + // Deprecated: LoadImports exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadImports = LoadFiles | NeedImports + + // Deprecated: LoadTypes exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadTypes = LoadImports | NeedTypes | NeedTypesSizes + + // Deprecated: LoadSyntax exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo + + // Deprecated: LoadAllSyntax exists for historical compatibility + // and should not be used. Please directly specify the needed fields using the Need values. + LoadAllSyntax = LoadSyntax | NeedDeps + + // Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile. + NeedExportsFile = NeedExportFile +) + +// A Config specifies details about how packages should be loaded. +// The zero value is a valid configuration. +// Calls to Load do not modify this struct. +type Config struct { + // Mode controls the level of information returned for each package. + Mode LoadMode + + // Context specifies the context for the load operation. + // If the context is cancelled, the loader may stop early + // and return an ErrCancelled error. + // If Context is nil, the load cannot be cancelled. + Context context.Context + + // Logf is the logger for the config. + // If the user provides a logger, debug logging is enabled. + // If the GOPACKAGESDEBUG environment variable is set to true, + // but the logger is nil, default to log.Printf. + Logf func(format string, args ...interface{}) + + // Dir is the directory in which to run the build system's query tool + // that provides information about the packages. + // If Dir is empty, the tool is run in the current directory. + Dir string + + // Env is the environment to use when invoking the build system's query tool. + // If Env is nil, the current environment is used. + // As in os/exec's Cmd, only the last value in the slice for + // each environment key is used. To specify the setting of only + // a few variables, append to the current environment, as in: + // + // opt.Env = append(os.Environ(), "GOOS=plan9", "GOARCH=386") + // + Env []string + + // gocmdRunner guards go command calls from concurrency errors. + gocmdRunner *gocommand.Runner + + // BuildFlags is a list of command-line flags to be passed through to + // the build system's query tool. + BuildFlags []string + + // modFile will be used for -modfile in go command invocations. + modFile string + + // modFlag will be used for -modfile in go command invocations. + modFlag string + + // Fset provides source position information for syntax trees and types. + // If Fset is nil, Load will use a new fileset, but preserve Fset's value. + Fset *token.FileSet + + // ParseFile is called to read and parse each file + // when preparing a package's type-checked syntax tree. + // It must be safe to call ParseFile simultaneously from multiple goroutines. + // If ParseFile is nil, the loader will uses parser.ParseFile. + // + // ParseFile should parse the source from src and use filename only for + // recording position information. + // + // An application may supply a custom implementation of ParseFile + // to change the effective file contents or the behavior of the parser, + // or to modify the syntax tree. For example, selectively eliminating + // unwanted function bodies can significantly accelerate type checking. + ParseFile func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) + + // If Tests is set, the loader includes not just the packages + // matching a particular pattern but also any related test packages, + // including test-only variants of the package and the test executable. + // + // For example, when using the go command, loading "fmt" with Tests=true + // returns four packages, with IDs "fmt" (the standard package), + // "fmt [fmt.test]" (the package as compiled for the test), + // "fmt_test" (the test functions from source files in package fmt_test), + // and "fmt.test" (the test binary). + // + // In build systems with explicit names for tests, + // setting Tests may have no effect. + Tests bool + + // Overlay provides a mapping of absolute file paths to file contents. + // If the file with the given path already exists, the parser will use the + // alternative file contents provided by the map. + // + // Overlays provide incomplete support for when a given file doesn't + // already exist on disk. See the package doc above for more details. + Overlay map[string][]byte +} + +// driver is the type for functions that query the build system for the +// packages named by the patterns. +type driver func(cfg *Config, patterns ...string) (*driverResponse, error) + +// driverResponse contains the results for a driver query. +type driverResponse struct { + // NotHandled is returned if the request can't be handled by the current + // driver. If an external driver returns a response with NotHandled, the + // rest of the driverResponse is ignored, and go/packages will fallback + // to the next driver. If go/packages is extended in the future to support + // lists of multiple drivers, go/packages will fall back to the next driver. + NotHandled bool + + // Sizes, if not nil, is the types.Sizes to use when type checking. + Sizes *types.StdSizes + + // Roots is the set of package IDs that make up the root packages. + // We have to encode this separately because when we encode a single package + // we cannot know if it is one of the roots as that requires knowledge of the + // graph it is part of. + Roots []string `json:",omitempty"` + + // Packages is the full set of packages in the graph. + // The packages are not connected into a graph. + // The Imports if populated will be stubs that only have their ID set. + // Imports will be connected and then type and syntax information added in a + // later pass (see refine). + Packages []*Package + + // GoVersion is the minor version number used by the driver + // (e.g. the go command on the PATH) when selecting .go files. + // Zero means unknown. + GoVersion int +} + +// Load loads and returns the Go packages named by the given patterns. +// +// Config specifies loading options; +// nil behaves the same as an empty Config. +// +// Load returns an error if any of the patterns was invalid +// as defined by the underlying build system. +// It may return an empty list of packages without an error, +// for instance for an empty expansion of a valid wildcard. +// Errors associated with a particular package are recorded in the +// corresponding Package's Errors list, and do not cause Load to +// return an error. Clients may need to handle such errors before +// proceeding with further analysis. The PrintErrors function is +// provided for convenient display of all errors. +func Load(cfg *Config, patterns ...string) ([]*Package, error) { + l := newLoader(cfg) + response, err := defaultDriver(&l.Config, patterns...) + if err != nil { + return nil, err + } + l.sizes = response.Sizes + return l.refine(response) +} + +// defaultDriver is a driver that implements go/packages' fallback behavior. +// It will try to request to an external driver, if one exists. If there's +// no external driver, or the driver returns a response with NotHandled set, +// defaultDriver will fall back to the go list driver. +func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, error) { + driver := findExternalDriver(cfg) + if driver == nil { + driver = goListDriver + } + response, err := driver(cfg, patterns...) + if err != nil { + return response, err + } else if response.NotHandled { + return goListDriver(cfg, patterns...) + } + return response, nil +} + +// A Package describes a loaded Go package. +type Package struct { + // ID is a unique identifier for a package, + // in a syntax provided by the underlying build system. + // + // Because the syntax varies based on the build system, + // clients should treat IDs as opaque and not attempt to + // interpret them. + ID string + + // Name is the package name as it appears in the package source code. + Name string + + // PkgPath is the package path as used by the go/types package. + PkgPath string + + // Errors contains any errors encountered querying the metadata + // of the package, or while parsing or type-checking its files. + Errors []Error + + // TypeErrors contains the subset of errors produced during type checking. + TypeErrors []types.Error + + // GoFiles lists the absolute file paths of the package's Go source files. + GoFiles []string + + // CompiledGoFiles lists the absolute file paths of the package's source + // files that are suitable for type checking. + // This may differ from GoFiles if files are processed before compilation. + CompiledGoFiles []string + + // OtherFiles lists the absolute file paths of the package's non-Go source files, + // including assembly, C, C++, Fortran, Objective-C, SWIG, and so on. + OtherFiles []string + + // EmbedFiles lists the absolute file paths of the package's files + // embedded with go:embed. + EmbedFiles []string + + // EmbedPatterns lists the absolute file patterns of the package's + // files embedded with go:embed. + EmbedPatterns []string + + // IgnoredFiles lists source files that are not part of the package + // using the current build configuration but that might be part of + // the package using other build configurations. + IgnoredFiles []string + + // ExportFile is the absolute path to a file containing type + // information for the package as provided by the build system. + ExportFile string + + // Imports maps import paths appearing in the package's Go source files + // to corresponding loaded Packages. + Imports map[string]*Package + + // Types provides type information for the package. + // The NeedTypes LoadMode bit sets this field for packages matching the + // patterns; type information for dependencies may be missing or incomplete, + // unless NeedDeps and NeedImports are also set. + Types *types.Package + + // Fset provides position information for Types, TypesInfo, and Syntax. + // It is set only when Types is set. + Fset *token.FileSet + + // IllTyped indicates whether the package or any dependency contains errors. + // It is set only when Types is set. + IllTyped bool + + // Syntax is the package's syntax trees, for the files listed in CompiledGoFiles. + // + // The NeedSyntax LoadMode bit populates this field for packages matching the patterns. + // If NeedDeps and NeedImports are also set, this field will also be populated + // for dependencies. + // + // Syntax is kept in the same order as CompiledGoFiles, with the caveat that nils are + // removed. If parsing returned nil, Syntax may be shorter than CompiledGoFiles. + Syntax []*ast.File + + // TypesInfo provides type information about the package's syntax trees. + // It is set only when Syntax is set. + TypesInfo *types.Info + + // TypesSizes provides the effective size function for types in TypesInfo. + TypesSizes types.Sizes + + // forTest is the package under test, if any. + forTest string + + // depsErrors is the DepsErrors field from the go list response, if any. + depsErrors []*packagesinternal.PackageError + + // module is the module information for the package if it exists. + Module *Module +} + +// Module provides module information for a package. +type Module struct { + Path string // module path + Version string // module version + Replace *Module // replaced by this module + Time *time.Time // time version was created + Main bool // is this the main module? + Indirect bool // is this module only an indirect dependency of main module? + Dir string // directory holding files for this module, if any + GoMod string // path to go.mod file used when loading this module, if any + GoVersion string // go version used in module + Error *ModuleError // error loading module +} + +// ModuleError holds errors loading a module. +type ModuleError struct { + Err string // the error itself +} + +func init() { + packagesinternal.GetForTest = func(p interface{}) string { + return p.(*Package).forTest + } + packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError { + return p.(*Package).depsErrors + } + packagesinternal.GetGoCmdRunner = func(config interface{}) *gocommand.Runner { + return config.(*Config).gocmdRunner + } + packagesinternal.SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) { + config.(*Config).gocmdRunner = runner + } + packagesinternal.SetModFile = func(config interface{}, value string) { + config.(*Config).modFile = value + } + packagesinternal.SetModFlag = func(config interface{}, value string) { + config.(*Config).modFlag = value + } + packagesinternal.TypecheckCgo = int(typecheckCgo) + packagesinternal.DepsErrors = int(needInternalDepsErrors) + packagesinternal.ForTest = int(needInternalForTest) +} + +// An Error describes a problem with a package's metadata, syntax, or types. +type Error struct { + Pos string // "file:line:col" or "file:line" or "" or "-" + Msg string + Kind ErrorKind +} + +// ErrorKind describes the source of the error, allowing the user to +// differentiate between errors generated by the driver, the parser, or the +// type-checker. +type ErrorKind int + +const ( + UnknownError ErrorKind = iota + ListError + ParseError + TypeError +) + +func (err Error) Error() string { + pos := err.Pos + if pos == "" { + pos = "-" // like token.Position{}.String() + } + return pos + ": " + err.Msg +} + +// flatPackage is the JSON form of Package +// It drops all the type and syntax fields, and transforms the Imports +// +// TODO(adonovan): identify this struct with Package, effectively +// publishing the JSON protocol. +type flatPackage struct { + ID string + Name string `json:",omitempty"` + PkgPath string `json:",omitempty"` + Errors []Error `json:",omitempty"` + GoFiles []string `json:",omitempty"` + CompiledGoFiles []string `json:",omitempty"` + OtherFiles []string `json:",omitempty"` + EmbedFiles []string `json:",omitempty"` + EmbedPatterns []string `json:",omitempty"` + IgnoredFiles []string `json:",omitempty"` + ExportFile string `json:",omitempty"` + Imports map[string]string `json:",omitempty"` +} + +// MarshalJSON returns the Package in its JSON form. +// For the most part, the structure fields are written out unmodified, and +// the type and syntax fields are skipped. +// The imports are written out as just a map of path to package id. +// The errors are written using a custom type that tries to preserve the +// structure of error types we know about. +// +// This method exists to enable support for additional build systems. It is +// not intended for use by clients of the API and we may change the format. +func (p *Package) MarshalJSON() ([]byte, error) { + flat := &flatPackage{ + ID: p.ID, + Name: p.Name, + PkgPath: p.PkgPath, + Errors: p.Errors, + GoFiles: p.GoFiles, + CompiledGoFiles: p.CompiledGoFiles, + OtherFiles: p.OtherFiles, + EmbedFiles: p.EmbedFiles, + EmbedPatterns: p.EmbedPatterns, + IgnoredFiles: p.IgnoredFiles, + ExportFile: p.ExportFile, + } + if len(p.Imports) > 0 { + flat.Imports = make(map[string]string, len(p.Imports)) + for path, ipkg := range p.Imports { + flat.Imports[path] = ipkg.ID + } + } + return json.Marshal(flat) +} + +// UnmarshalJSON reads in a Package from its JSON format. +// See MarshalJSON for details about the format accepted. +func (p *Package) UnmarshalJSON(b []byte) error { + flat := &flatPackage{} + if err := json.Unmarshal(b, &flat); err != nil { + return err + } + *p = Package{ + ID: flat.ID, + Name: flat.Name, + PkgPath: flat.PkgPath, + Errors: flat.Errors, + GoFiles: flat.GoFiles, + CompiledGoFiles: flat.CompiledGoFiles, + OtherFiles: flat.OtherFiles, + EmbedFiles: flat.EmbedFiles, + EmbedPatterns: flat.EmbedPatterns, + ExportFile: flat.ExportFile, + } + if len(flat.Imports) > 0 { + p.Imports = make(map[string]*Package, len(flat.Imports)) + for path, id := range flat.Imports { + p.Imports[path] = &Package{ID: id} + } + } + return nil +} + +func (p *Package) String() string { return p.ID } + +// loaderPackage augments Package with state used during the loading phase +type loaderPackage struct { + *Package + importErrors map[string]error // maps each bad import to its error + loadOnce sync.Once + color uint8 // for cycle detection + needsrc bool // load from source (Mode >= LoadTypes) + needtypes bool // type information is either requested or depended on + initial bool // package was matched by a pattern + goVersion int // minor version number of go command on PATH +} + +// loader holds the working state of a single call to load. +type loader struct { + pkgs map[string]*loaderPackage + Config + sizes types.Sizes + parseCache map[string]*parseValue + parseCacheMu sync.Mutex + exportMu sync.Mutex // enforces mutual exclusion of exportdata operations + + // Config.Mode contains the implied mode (see impliedLoadMode). + // Implied mode contains all the fields we need the data for. + // In requestedMode there are the actually requested fields. + // We'll zero them out before returning packages to the user. + // This makes it easier for us to get the conditions where + // we need certain modes right. + requestedMode LoadMode +} + +type parseValue struct { + f *ast.File + err error + ready chan struct{} +} + +func newLoader(cfg *Config) *loader { + ld := &loader{ + parseCache: map[string]*parseValue{}, + } + if cfg != nil { + ld.Config = *cfg + // If the user has provided a logger, use it. + ld.Config.Logf = cfg.Logf + } + if ld.Config.Logf == nil { + // If the GOPACKAGESDEBUG environment variable is set to true, + // but the user has not provided a logger, default to log.Printf. + if debug { + ld.Config.Logf = log.Printf + } else { + ld.Config.Logf = func(format string, args ...interface{}) {} + } + } + if ld.Config.Mode == 0 { + ld.Config.Mode = NeedName | NeedFiles | NeedCompiledGoFiles // Preserve zero behavior of Mode for backwards compatibility. + } + if ld.Config.Env == nil { + ld.Config.Env = os.Environ() + } + if ld.Config.gocmdRunner == nil { + ld.Config.gocmdRunner = &gocommand.Runner{} + } + if ld.Context == nil { + ld.Context = context.Background() + } + if ld.Dir == "" { + if dir, err := os.Getwd(); err == nil { + ld.Dir = dir + } + } + + // Save the actually requested fields. We'll zero them out before returning packages to the user. + ld.requestedMode = ld.Mode + ld.Mode = impliedLoadMode(ld.Mode) + + if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { + if ld.Fset == nil { + ld.Fset = token.NewFileSet() + } + + // ParseFile is required even in LoadTypes mode + // because we load source if export data is missing. + if ld.ParseFile == nil { + ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { + const mode = parser.AllErrors | parser.ParseComments + return parser.ParseFile(fset, filename, src, mode) + } + } + } + + return ld +} + +// refine connects the supplied packages into a graph and then adds type and +// and syntax information as requested by the LoadMode. +func (ld *loader) refine(response *driverResponse) ([]*Package, error) { + roots := response.Roots + rootMap := make(map[string]int, len(roots)) + for i, root := range roots { + rootMap[root] = i + } + ld.pkgs = make(map[string]*loaderPackage) + // first pass, fixup and build the map and roots + var initial = make([]*loaderPackage, len(roots)) + for _, pkg := range response.Packages { + rootIndex := -1 + if i, found := rootMap[pkg.ID]; found { + rootIndex = i + } + + // Overlays can invalidate export data. + // TODO(matloob): make this check fine-grained based on dependencies on overlaid files + exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe" + // This package needs type information if the caller requested types and the package is + // either a root, or it's a non-root and the user requested dependencies ... + needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) + // This package needs source if the call requested source (or types info, which implies source) + // and the package is either a root, or itas a non- root and the user requested dependencies... + needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) || + // ... or if we need types and the exportData is invalid. We fall back to (incompletely) + // typechecking packages from source if they fail to compile. + (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && exportDataInvalid)) && pkg.PkgPath != "unsafe" + lpkg := &loaderPackage{ + Package: pkg, + needtypes: needtypes, + needsrc: needsrc, + goVersion: response.GoVersion, + } + ld.pkgs[lpkg.ID] = lpkg + if rootIndex >= 0 { + initial[rootIndex] = lpkg + lpkg.initial = true + } + } + for i, root := range roots { + if initial[i] == nil { + return nil, fmt.Errorf("root package %v is missing", root) + } + } + + // Materialize the import graph. + + const ( + white = 0 // new + grey = 1 // in progress + black = 2 // complete + ) + + // visit traverses the import graph, depth-first, + // and materializes the graph as Packages.Imports. + // + // Valid imports are saved in the Packages.Import map. + // Invalid imports (cycles and missing nodes) are saved in the importErrors map. + // Thus, even in the presence of both kinds of errors, the Import graph remains a DAG. + // + // visit returns whether the package needs src or has a transitive + // dependency on a package that does. These are the only packages + // for which we load source code. + var stack []*loaderPackage + var visit func(lpkg *loaderPackage) bool + var srcPkgs []*loaderPackage + visit = func(lpkg *loaderPackage) bool { + switch lpkg.color { + case black: + return lpkg.needsrc + case grey: + panic("internal error: grey node") + } + lpkg.color = grey + stack = append(stack, lpkg) // push + stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports + // If NeedImports isn't set, the imports fields will all be zeroed out. + if ld.Mode&NeedImports != 0 { + lpkg.Imports = make(map[string]*Package, len(stubs)) + for importPath, ipkg := range stubs { + var importErr error + imp := ld.pkgs[ipkg.ID] + if imp == nil { + // (includes package "C" when DisableCgo) + importErr = fmt.Errorf("missing package: %q", ipkg.ID) + } else if imp.color == grey { + importErr = fmt.Errorf("import cycle: %s", stack) + } + if importErr != nil { + if lpkg.importErrors == nil { + lpkg.importErrors = make(map[string]error) + } + lpkg.importErrors[importPath] = importErr + continue + } + + if visit(imp) { + lpkg.needsrc = true + } + lpkg.Imports[importPath] = imp.Package + } + } + if lpkg.needsrc { + srcPkgs = append(srcPkgs, lpkg) + } + if ld.Mode&NeedTypesSizes != 0 { + lpkg.TypesSizes = ld.sizes + } + stack = stack[:len(stack)-1] // pop + lpkg.color = black + + return lpkg.needsrc + } + + if ld.Mode&NeedImports == 0 { + // We do this to drop the stub import packages that we are not even going to try to resolve. + for _, lpkg := range initial { + lpkg.Imports = nil + } + } else { + // For each initial package, create its import DAG. + for _, lpkg := range initial { + visit(lpkg) + } + } + if ld.Mode&NeedImports != 0 && ld.Mode&NeedTypes != 0 { + for _, lpkg := range srcPkgs { + // Complete type information is required for the + // immediate dependencies of each source package. + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + imp.needtypes = true + } + } + } + // Load type data and syntax if needed, starting at + // the initial packages (roots of the import DAG). + if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 { + var wg sync.WaitGroup + for _, lpkg := range initial { + wg.Add(1) + go func(lpkg *loaderPackage) { + ld.loadRecursive(lpkg) + wg.Done() + }(lpkg) + } + wg.Wait() + } + + result := make([]*Package, len(initial)) + for i, lpkg := range initial { + result[i] = lpkg.Package + } + for i := range ld.pkgs { + // Clear all unrequested fields, + // to catch programs that use more than they request. + if ld.requestedMode&NeedName == 0 { + ld.pkgs[i].Name = "" + ld.pkgs[i].PkgPath = "" + } + if ld.requestedMode&NeedFiles == 0 { + ld.pkgs[i].GoFiles = nil + ld.pkgs[i].OtherFiles = nil + ld.pkgs[i].IgnoredFiles = nil + } + if ld.requestedMode&NeedEmbedFiles == 0 { + ld.pkgs[i].EmbedFiles = nil + } + if ld.requestedMode&NeedEmbedPatterns == 0 { + ld.pkgs[i].EmbedPatterns = nil + } + if ld.requestedMode&NeedCompiledGoFiles == 0 { + ld.pkgs[i].CompiledGoFiles = nil + } + if ld.requestedMode&NeedImports == 0 { + ld.pkgs[i].Imports = nil + } + if ld.requestedMode&NeedExportFile == 0 { + ld.pkgs[i].ExportFile = "" + } + if ld.requestedMode&NeedTypes == 0 { + ld.pkgs[i].Types = nil + ld.pkgs[i].Fset = nil + ld.pkgs[i].IllTyped = false + } + if ld.requestedMode&NeedSyntax == 0 { + ld.pkgs[i].Syntax = nil + } + if ld.requestedMode&NeedTypesInfo == 0 { + ld.pkgs[i].TypesInfo = nil + } + if ld.requestedMode&NeedTypesSizes == 0 { + ld.pkgs[i].TypesSizes = nil + } + if ld.requestedMode&NeedModule == 0 { + ld.pkgs[i].Module = nil + } + } + + return result, nil +} + +// loadRecursive loads the specified package and its dependencies, +// recursively, in parallel, in topological order. +// It is atomic and idempotent. +// Precondition: ld.Mode&NeedTypes. +func (ld *loader) loadRecursive(lpkg *loaderPackage) { + lpkg.loadOnce.Do(func() { + // Load the direct dependencies, in parallel. + var wg sync.WaitGroup + for _, ipkg := range lpkg.Imports { + imp := ld.pkgs[ipkg.ID] + wg.Add(1) + go func(imp *loaderPackage) { + ld.loadRecursive(imp) + wg.Done() + }(imp) + } + wg.Wait() + ld.loadPackage(lpkg) + }) +} + +// loadPackage loads the specified package. +// It must be called only once per Package, +// after immediate dependencies are loaded. +// Precondition: ld.Mode & NeedTypes. +func (ld *loader) loadPackage(lpkg *loaderPackage) { + if lpkg.PkgPath == "unsafe" { + // Fill in the blanks to avoid surprises. + lpkg.Types = types.Unsafe + lpkg.Fset = ld.Fset + lpkg.Syntax = []*ast.File{} + lpkg.TypesInfo = new(types.Info) + lpkg.TypesSizes = ld.sizes + return + } + + // Call NewPackage directly with explicit name. + // This avoids skew between golist and go/types when the files' + // package declarations are inconsistent. + lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name) + lpkg.Fset = ld.Fset + + // Subtle: we populate all Types fields with an empty Package + // before loading export data so that export data processing + // never has to create a types.Package for an indirect dependency, + // which would then require that such created packages be explicitly + // inserted back into the Import graph as a final step after export data loading. + // The Diamond test exercises this case. + if !lpkg.needtypes && !lpkg.needsrc { + return + } + if !lpkg.needsrc { + ld.loadFromExportData(lpkg) + return // not a source package, don't get syntax trees + } + + appendError := func(err error) { + // Convert various error types into the one true Error. + var errs []Error + switch err := err.(type) { + case Error: + // from driver + errs = append(errs, err) + + case *os.PathError: + // from parser + errs = append(errs, Error{ + Pos: err.Path + ":1", + Msg: err.Err.Error(), + Kind: ParseError, + }) + + case scanner.ErrorList: + // from parser + for _, err := range err { + errs = append(errs, Error{ + Pos: err.Pos.String(), + Msg: err.Msg, + Kind: ParseError, + }) + } + + case types.Error: + // from type checker + lpkg.TypeErrors = append(lpkg.TypeErrors, err) + errs = append(errs, Error{ + Pos: err.Fset.Position(err.Pos).String(), + Msg: err.Msg, + Kind: TypeError, + }) + + default: + // unexpected impoverished error from parser? + errs = append(errs, Error{ + Pos: "-", + Msg: err.Error(), + Kind: UnknownError, + }) + + // If you see this error message, please file a bug. + log.Printf("internal error: error %q (%T) without position", err, err) + } + + lpkg.Errors = append(lpkg.Errors, errs...) + } + + // If the go command on the PATH is newer than the runtime, + // then the go/{scanner,ast,parser,types} packages from the + // standard library may be unable to process the files + // selected by go list. + // + // There is currently no way to downgrade the effective + // version of the go command (see issue 52078), so we proceed + // with the newer go command but, in case of parse or type + // errors, we emit an additional diagnostic. + // + // See: + // - golang.org/issue/52078 (flag to set release tags) + // - golang.org/issue/50825 (gopls legacy version support) + // - golang.org/issue/55883 (go/packages confusing error) + // + // Should we assert a hard minimum of (currently) go1.16 here? + var runtimeVersion int + if _, err := fmt.Sscanf(runtime.Version(), "go1.%d", &runtimeVersion); err == nil && runtimeVersion < lpkg.goVersion { + defer func() { + if len(lpkg.Errors) > 0 { + appendError(Error{ + Pos: "-", + Msg: fmt.Sprintf("This application uses version go1.%d of the source-processing packages but runs version go1.%d of 'go list'. It may fail to process source files that rely on newer language features. If so, rebuild the application using a newer version of Go.", runtimeVersion, lpkg.goVersion), + Kind: UnknownError, + }) + } + }() + } + + if ld.Config.Mode&NeedTypes != 0 && len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" { + // The config requested loading sources and types, but sources are missing. + // Add an error to the package and fall back to loading from export data. + appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError}) + ld.loadFromExportData(lpkg) + return // can't get syntax trees for this package + } + + files, errs := ld.parseFiles(lpkg.CompiledGoFiles) + for _, err := range errs { + appendError(err) + } + + lpkg.Syntax = files + if ld.Config.Mode&NeedTypes == 0 { + return + } + + lpkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Scopes: make(map[ast.Node]*types.Scope), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + } + typeparams.InitInstanceInfo(lpkg.TypesInfo) + lpkg.TypesSizes = ld.sizes + + importer := importerFunc(func(path string) (*types.Package, error) { + if path == "unsafe" { + return types.Unsafe, nil + } + + // The imports map is keyed by import path. + ipkg := lpkg.Imports[path] + if ipkg == nil { + if err := lpkg.importErrors[path]; err != nil { + return nil, err + } + // There was skew between the metadata and the + // import declarations, likely due to an edit + // race, or because the ParseFile feature was + // used to supply alternative file contents. + return nil, fmt.Errorf("no metadata for %s", path) + } + + if ipkg.Types != nil && ipkg.Types.Complete() { + return ipkg.Types, nil + } + log.Fatalf("internal error: package %q without types was imported from %q", path, lpkg) + panic("unreachable") + }) + + // type-check + tc := &types.Config{ + Importer: importer, + + // Type-check bodies of functions only in initial packages. + // Example: for import graph A->B->C and initial packages {A,C}, + // we can ignore function bodies in B. + IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial, + + Error: appendError, + Sizes: ld.sizes, + } + if (ld.Mode & typecheckCgo) != 0 { + if !typesinternal.SetUsesCgo(tc) { + appendError(Error{ + Msg: "typecheckCgo requires Go 1.15+", + Kind: ListError, + }) + return + } + } + types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) + + lpkg.importErrors = nil // no longer needed + + // If !Cgo, the type-checker uses FakeImportC mode, so + // it doesn't invoke the importer for import "C", + // nor report an error for the import, + // or for any undefined C.f reference. + // We must detect this explicitly and correctly + // mark the package as IllTyped (by reporting an error). + // TODO(adonovan): if these errors are annoying, + // we could just set IllTyped quietly. + if tc.FakeImportC { + outer: + for _, f := range lpkg.Syntax { + for _, imp := range f.Imports { + if imp.Path.Value == `"C"` { + err := types.Error{Fset: ld.Fset, Pos: imp.Pos(), Msg: `import "C" ignored`} + appendError(err) + break outer + } + } + } + } + + // Record accumulated errors. + illTyped := len(lpkg.Errors) > 0 + if !illTyped { + for _, imp := range lpkg.Imports { + if imp.IllTyped { + illTyped = true + break + } + } + } + lpkg.IllTyped = illTyped +} + +// An importFunc is an implementation of the single-method +// types.Importer interface based on a function value. +type importerFunc func(path string) (*types.Package, error) + +func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) } + +// We use a counting semaphore to limit +// the number of parallel I/O calls per process. +var ioLimit = make(chan bool, 20) + +func (ld *loader) parseFile(filename string) (*ast.File, error) { + ld.parseCacheMu.Lock() + v, ok := ld.parseCache[filename] + if ok { + // cache hit + ld.parseCacheMu.Unlock() + <-v.ready + } else { + // cache miss + v = &parseValue{ready: make(chan struct{})} + ld.parseCache[filename] = v + ld.parseCacheMu.Unlock() + + var src []byte + for f, contents := range ld.Config.Overlay { + if sameFile(f, filename) { + src = contents + } + } + var err error + if src == nil { + ioLimit <- true // wait + src, err = ioutil.ReadFile(filename) + <-ioLimit // signal + } + if err != nil { + v.err = err + } else { + v.f, v.err = ld.ParseFile(ld.Fset, filename, src) + } + + close(v.ready) + } + return v.f, v.err +} + +// parseFiles reads and parses the Go source files and returns the ASTs +// of the ones that could be at least partially parsed, along with a +// list of I/O and parse errors encountered. +// +// Because files are scanned in parallel, the token.Pos +// positions of the resulting ast.Files are not ordered. +func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) { + var wg sync.WaitGroup + n := len(filenames) + parsed := make([]*ast.File, n) + errors := make([]error, n) + for i, file := range filenames { + if ld.Config.Context.Err() != nil { + parsed[i] = nil + errors[i] = ld.Config.Context.Err() + continue + } + wg.Add(1) + go func(i int, filename string) { + parsed[i], errors[i] = ld.parseFile(filename) + wg.Done() + }(i, file) + } + wg.Wait() + + // Eliminate nils, preserving order. + var o int + for _, f := range parsed { + if f != nil { + parsed[o] = f + o++ + } + } + parsed = parsed[:o] + + o = 0 + for _, err := range errors { + if err != nil { + errors[o] = err + o++ + } + } + errors = errors[:o] + + return parsed, errors +} + +// sameFile returns true if x and y have the same basename and denote +// the same file. +func sameFile(x, y string) bool { + if x == y { + // It could be the case that y doesn't exist. + // For instance, it may be an overlay file that + // hasn't been written to disk. To handle that case + // let x == y through. (We added the exact absolute path + // string to the CompiledGoFiles list, so the unwritten + // overlay case implies x==y.) + return true + } + if strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation) + if xi, err := os.Stat(x); err == nil { + if yi, err := os.Stat(y); err == nil { + return os.SameFile(xi, yi) + } + } + } + return false +} + +// loadFromExportData returns type information for the specified +// package, loading it from an export data file on the first request. +func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error) { + if lpkg.PkgPath == "" { + log.Fatalf("internal error: Package %s has no PkgPath", lpkg) + } + + // Because gcexportdata.Read has the potential to create or + // modify the types.Package for each node in the transitive + // closure of dependencies of lpkg, all exportdata operations + // must be sequential. (Finer-grained locking would require + // changes to the gcexportdata API.) + // + // The exportMu lock guards the Package.Pkg field and the + // types.Package it points to, for each Package in the graph. + // + // Not all accesses to Package.Pkg need to be protected by exportMu: + // graph ordering ensures that direct dependencies of source + // packages are fully loaded before the importer reads their Pkg field. + ld.exportMu.Lock() + defer ld.exportMu.Unlock() + + if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() { + return tpkg, nil // cache hit + } + + lpkg.IllTyped = true // fail safe + + if lpkg.ExportFile == "" { + // Errors while building export data will have been printed to stderr. + return nil, fmt.Errorf("no export data file") + } + f, err := os.Open(lpkg.ExportFile) + if err != nil { + return nil, err + } + defer f.Close() + + // Read gc export data. + // + // We don't currently support gccgo export data because all + // underlying workspaces use the gc toolchain. (Even build + // systems that support gccgo don't use it for workspace + // queries.) + r, err := gcexportdata.NewReader(f) + if err != nil { + return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + + // Build the view. + // + // The gcexportdata machinery has no concept of package ID. + // It identifies packages by their PkgPath, which although not + // globally unique is unique within the scope of one invocation + // of the linker, type-checker, or gcexportdata. + // + // So, we must build a PkgPath-keyed view of the global + // (conceptually ID-keyed) cache of packages and pass it to + // gcexportdata. The view must contain every existing + // package that might possibly be mentioned by the + // current package---its transitive closure. + // + // In loadPackage, we unconditionally create a types.Package for + // each dependency so that export data loading does not + // create new ones. + // + // TODO(adonovan): it would be simpler and more efficient + // if the export data machinery invoked a callback to + // get-or-create a package instead of a map. + // + view := make(map[string]*types.Package) // view seen by gcexportdata + seen := make(map[*loaderPackage]bool) // all visited packages + var visit func(pkgs map[string]*Package) + visit = func(pkgs map[string]*Package) { + for _, p := range pkgs { + lpkg := ld.pkgs[p.ID] + if !seen[lpkg] { + seen[lpkg] = true + view[lpkg.PkgPath] = lpkg.Types + visit(lpkg.Imports) + } + } + } + visit(lpkg.Imports) + + viewLen := len(view) + 1 // adding the self package + // Parse the export data. + // (May modify incomplete packages in view but not create new ones.) + tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath) + if err != nil { + return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err) + } + if _, ok := view["go.shape"]; ok { + // Account for the pseudopackage "go.shape" that gets + // created by generic code. + viewLen++ + } + if viewLen != len(view) { + log.Panicf("golang.org/x/tools/go/packages: unexpected new packages during load of %s", lpkg.PkgPath) + } + + lpkg.Types = tpkg + lpkg.IllTyped = false + + return tpkg, nil +} + +// impliedLoadMode returns loadMode with its dependencies. +func impliedLoadMode(loadMode LoadMode) LoadMode { + if loadMode&(NeedDeps|NeedTypes|NeedTypesInfo) != 0 { + // All these things require knowing the import graph. + loadMode |= NeedImports + } + + return loadMode +} + +func usesExportData(cfg *Config) bool { + return cfg.Mode&NeedExportFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0 +} + +var _ interface{} = io.Discard // assert build toolchain is go1.16 or later diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go new file mode 100644 index 0000000000000..a1dcc40b7270d --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/visit.go @@ -0,0 +1,59 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "fmt" + "os" + "sort" +) + +// Visit visits all the packages in the import graph whose roots are +// pkgs, calling the optional pre function the first time each package +// is encountered (preorder), and the optional post function after a +// package's dependencies have been visited (postorder). +// The boolean result of pre(pkg) determines whether +// the imports of package pkg are visited. +func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { + seen := make(map[*Package]bool) + var visit func(*Package) + visit = func(pkg *Package) { + if !seen[pkg] { + seen[pkg] = true + + if pre == nil || pre(pkg) { + paths := make([]string, 0, len(pkg.Imports)) + for path := range pkg.Imports { + paths = append(paths, path) + } + sort.Strings(paths) // Imports is a map, this makes visit stable + for _, path := range paths { + visit(pkg.Imports[path]) + } + } + + if post != nil { + post(pkg) + } + } + } + for _, pkg := range pkgs { + visit(pkg) + } +} + +// PrintErrors prints to os.Stderr the accumulated errors of all +// packages in the import graph rooted at pkgs, dependencies first. +// PrintErrors returns the number of errors printed. +func PrintErrors(pkgs []*Package) int { + var n int + Visit(pkgs, nil, func(pkg *Package) { + for _, err := range pkg.Errors { + fmt.Fprintln(os.Stderr, err) + n++ + } + }) + return n +} diff --git a/vendor/golang.org/x/tools/internal/event/core/event.go b/vendor/golang.org/x/tools/internal/event/core/event.go new file mode 100644 index 0000000000000..a6cf0e64a4b17 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/core/event.go @@ -0,0 +1,85 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package core provides support for event based telemetry. +package core + +import ( + "fmt" + "time" + + "golang.org/x/tools/internal/event/label" +) + +// Event holds the information about an event of note that occurred. +type Event struct { + at time.Time + + // As events are often on the stack, storing the first few labels directly + // in the event can avoid an allocation at all for the very common cases of + // simple events. + // The length needs to be large enough to cope with the majority of events + // but no so large as to cause undue stack pressure. + // A log message with two values will use 3 labels (one for each value and + // one for the message itself). + + static [3]label.Label // inline storage for the first few labels + dynamic []label.Label // dynamically sized storage for remaining labels +} + +// eventLabelMap implements label.Map for a the labels of an Event. +type eventLabelMap struct { + event Event +} + +func (ev Event) At() time.Time { return ev.at } + +func (ev Event) Format(f fmt.State, r rune) { + if !ev.at.IsZero() { + fmt.Fprint(f, ev.at.Format("2006/01/02 15:04:05 ")) + } + for index := 0; ev.Valid(index); index++ { + if l := ev.Label(index); l.Valid() { + fmt.Fprintf(f, "\n\t%v", l) + } + } +} + +func (ev Event) Valid(index int) bool { + return index >= 0 && index < len(ev.static)+len(ev.dynamic) +} + +func (ev Event) Label(index int) label.Label { + if index < len(ev.static) { + return ev.static[index] + } + return ev.dynamic[index-len(ev.static)] +} + +func (ev Event) Find(key label.Key) label.Label { + for _, l := range ev.static { + if l.Key() == key { + return l + } + } + for _, l := range ev.dynamic { + if l.Key() == key { + return l + } + } + return label.Label{} +} + +func MakeEvent(static [3]label.Label, labels []label.Label) Event { + return Event{ + static: static, + dynamic: labels, + } +} + +// CloneEvent event returns a copy of the event with the time adjusted to at. +func CloneEvent(ev Event, at time.Time) Event { + ev.at = at + return ev +} diff --git a/vendor/golang.org/x/tools/internal/event/core/export.go b/vendor/golang.org/x/tools/internal/event/core/export.go new file mode 100644 index 0000000000000..05f3a9a5791a9 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/core/export.go @@ -0,0 +1,70 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "context" + "sync/atomic" + "time" + "unsafe" + + "golang.org/x/tools/internal/event/label" +) + +// Exporter is a function that handles events. +// It may return a modified context and event. +type Exporter func(context.Context, Event, label.Map) context.Context + +var ( + exporter unsafe.Pointer +) + +// SetExporter sets the global exporter function that handles all events. +// The exporter is called synchronously from the event call site, so it should +// return quickly so as not to hold up user code. +func SetExporter(e Exporter) { + p := unsafe.Pointer(&e) + if e == nil { + // &e is always valid, and so p is always valid, but for the early abort + // of ProcessEvent to be efficient it needs to make the nil check on the + // pointer without having to dereference it, so we make the nil function + // also a nil pointer + p = nil + } + atomic.StorePointer(&exporter, p) +} + +// deliver is called to deliver an event to the supplied exporter. +// it will fill in the time. +func deliver(ctx context.Context, exporter Exporter, ev Event) context.Context { + // add the current time to the event + ev.at = time.Now() + // hand the event off to the current exporter + return exporter(ctx, ev, ev) +} + +// Export is called to deliver an event to the global exporter if set. +func Export(ctx context.Context, ev Event) context.Context { + // get the global exporter and abort early if there is not one + exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter)) + if exporterPtr == nil { + return ctx + } + return deliver(ctx, *exporterPtr, ev) +} + +// ExportPair is called to deliver a start event to the supplied exporter. +// It also returns a function that will deliver the end event to the same +// exporter. +// It will fill in the time. +func ExportPair(ctx context.Context, begin, end Event) (context.Context, func()) { + // get the global exporter and abort early if there is not one + exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter)) + if exporterPtr == nil { + return ctx, func() {} + } + ctx = deliver(ctx, *exporterPtr, begin) + return ctx, func() { deliver(ctx, *exporterPtr, end) } +} diff --git a/vendor/golang.org/x/tools/internal/event/core/fast.go b/vendor/golang.org/x/tools/internal/event/core/fast.go new file mode 100644 index 0000000000000..06c1d4615e6b2 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/core/fast.go @@ -0,0 +1,77 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "context" + + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" +) + +// Log1 takes a message and one label delivers a log event to the exporter. +// It is a customized version of Print that is faster and does no allocation. +func Log1(ctx context.Context, message string, t1 label.Label) { + Export(ctx, MakeEvent([3]label.Label{ + keys.Msg.Of(message), + t1, + }, nil)) +} + +// Log2 takes a message and two labels and delivers a log event to the exporter. +// It is a customized version of Print that is faster and does no allocation. +func Log2(ctx context.Context, message string, t1 label.Label, t2 label.Label) { + Export(ctx, MakeEvent([3]label.Label{ + keys.Msg.Of(message), + t1, + t2, + }, nil)) +} + +// Metric1 sends a label event to the exporter with the supplied labels. +func Metric1(ctx context.Context, t1 label.Label) context.Context { + return Export(ctx, MakeEvent([3]label.Label{ + keys.Metric.New(), + t1, + }, nil)) +} + +// Metric2 sends a label event to the exporter with the supplied labels. +func Metric2(ctx context.Context, t1, t2 label.Label) context.Context { + return Export(ctx, MakeEvent([3]label.Label{ + keys.Metric.New(), + t1, + t2, + }, nil)) +} + +// Start1 sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start1(ctx context.Context, name string, t1 label.Label) (context.Context, func()) { + return ExportPair(ctx, + MakeEvent([3]label.Label{ + keys.Start.Of(name), + t1, + }, nil), + MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} + +// Start2 sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start2(ctx context.Context, name string, t1, t2 label.Label) (context.Context, func()) { + return ExportPair(ctx, + MakeEvent([3]label.Label{ + keys.Start.Of(name), + t1, + t2, + }, nil), + MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} diff --git a/vendor/golang.org/x/tools/internal/event/doc.go b/vendor/golang.org/x/tools/internal/event/doc.go new file mode 100644 index 0000000000000..5dc6e6babedd8 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/doc.go @@ -0,0 +1,7 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package event provides a set of packages that cover the main +// concepts of telemetry in an implementation agnostic way. +package event diff --git a/vendor/golang.org/x/tools/internal/event/event.go b/vendor/golang.org/x/tools/internal/event/event.go new file mode 100644 index 0000000000000..4d55e577d1a80 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/event.go @@ -0,0 +1,127 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package event + +import ( + "context" + + "golang.org/x/tools/internal/event/core" + "golang.org/x/tools/internal/event/keys" + "golang.org/x/tools/internal/event/label" +) + +// Exporter is a function that handles events. +// It may return a modified context and event. +type Exporter func(context.Context, core.Event, label.Map) context.Context + +// SetExporter sets the global exporter function that handles all events. +// The exporter is called synchronously from the event call site, so it should +// return quickly so as not to hold up user code. +func SetExporter(e Exporter) { + core.SetExporter(core.Exporter(e)) +} + +// Log takes a message and a label list and combines them into a single event +// before delivering them to the exporter. +func Log(ctx context.Context, message string, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Msg.Of(message), + }, labels)) +} + +// IsLog returns true if the event was built by the Log function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsLog(ev core.Event) bool { + return ev.Label(0).Key() == keys.Msg +} + +// Error takes a message and a label list and combines them into a single event +// before delivering them to the exporter. It captures the error in the +// delivered event. +func Error(ctx context.Context, message string, err error, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Msg.Of(message), + keys.Err.Of(err), + }, labels)) +} + +// IsError returns true if the event was built by the Error function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsError(ev core.Event) bool { + return ev.Label(0).Key() == keys.Msg && + ev.Label(1).Key() == keys.Err +} + +// Metric sends a label event to the exporter with the supplied labels. +func Metric(ctx context.Context, labels ...label.Label) { + core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Metric.New(), + }, labels)) +} + +// IsMetric returns true if the event was built by the Metric function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsMetric(ev core.Event) bool { + return ev.Label(0).Key() == keys.Metric +} + +// Label sends a label event to the exporter with the supplied labels. +func Label(ctx context.Context, labels ...label.Label) context.Context { + return core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Label.New(), + }, labels)) +} + +// IsLabel returns true if the event was built by the Label function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsLabel(ev core.Event) bool { + return ev.Label(0).Key() == keys.Label +} + +// Start sends a span start event with the supplied label list to the exporter. +// It also returns a function that will end the span, which should normally be +// deferred. +func Start(ctx context.Context, name string, labels ...label.Label) (context.Context, func()) { + return core.ExportPair(ctx, + core.MakeEvent([3]label.Label{ + keys.Start.Of(name), + }, labels), + core.MakeEvent([3]label.Label{ + keys.End.New(), + }, nil)) +} + +// IsStart returns true if the event was built by the Start function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsStart(ev core.Event) bool { + return ev.Label(0).Key() == keys.Start +} + +// IsEnd returns true if the event was built by the End function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsEnd(ev core.Event) bool { + return ev.Label(0).Key() == keys.End +} + +// Detach returns a context without an associated span. +// This allows the creation of spans that are not children of the current span. +func Detach(ctx context.Context) context.Context { + return core.Export(ctx, core.MakeEvent([3]label.Label{ + keys.Detach.New(), + }, nil)) +} + +// IsDetach returns true if the event was built by the Detach function. +// It is intended to be used in exporters to identify the semantics of the +// event when deciding what to do with it. +func IsDetach(ev core.Event) bool { + return ev.Label(0).Key() == keys.Detach +} diff --git a/vendor/golang.org/x/tools/internal/event/keys/keys.go b/vendor/golang.org/x/tools/internal/event/keys/keys.go new file mode 100644 index 0000000000000..a02206e30150d --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/keys/keys.go @@ -0,0 +1,564 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +import ( + "fmt" + "io" + "math" + "strconv" + + "golang.org/x/tools/internal/event/label" +) + +// Value represents a key for untyped values. +type Value struct { + name string + description string +} + +// New creates a new Key for untyped values. +func New(name, description string) *Value { + return &Value{name: name, description: description} +} + +func (k *Value) Name() string { return k.name } +func (k *Value) Description() string { return k.description } + +func (k *Value) Format(w io.Writer, buf []byte, l label.Label) { + fmt.Fprint(w, k.From(l)) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Value) Get(lm label.Map) interface{} { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return nil +} + +// From can be used to get a value from a Label. +func (k *Value) From(t label.Label) interface{} { return t.UnpackValue() } + +// Of creates a new Label with this key and the supplied value. +func (k *Value) Of(value interface{}) label.Label { return label.OfValue(k, value) } + +// Tag represents a key for tagging labels that have no value. +// These are used when the existence of the label is the entire information it +// carries, such as marking events to be of a specific kind, or from a specific +// package. +type Tag struct { + name string + description string +} + +// NewTag creates a new Key for tagging labels. +func NewTag(name, description string) *Tag { + return &Tag{name: name, description: description} +} + +func (k *Tag) Name() string { return k.name } +func (k *Tag) Description() string { return k.description } + +func (k *Tag) Format(w io.Writer, buf []byte, l label.Label) {} + +// New creates a new Label with this key. +func (k *Tag) New() label.Label { return label.OfValue(k, nil) } + +// Int represents a key +type Int struct { + name string + description string +} + +// NewInt creates a new Key for int values. +func NewInt(name, description string) *Int { + return &Int{name: name, description: description} +} + +func (k *Int) Name() string { return k.name } +func (k *Int) Description() string { return k.description } + +func (k *Int) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int) Of(v int) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int) Get(lm label.Map) int { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int) From(t label.Label) int { return int(t.Unpack64()) } + +// Int8 represents a key +type Int8 struct { + name string + description string +} + +// NewInt8 creates a new Key for int8 values. +func NewInt8(name, description string) *Int8 { + return &Int8{name: name, description: description} +} + +func (k *Int8) Name() string { return k.name } +func (k *Int8) Description() string { return k.description } + +func (k *Int8) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int8) Of(v int8) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int8) Get(lm label.Map) int8 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int8) From(t label.Label) int8 { return int8(t.Unpack64()) } + +// Int16 represents a key +type Int16 struct { + name string + description string +} + +// NewInt16 creates a new Key for int16 values. +func NewInt16(name, description string) *Int16 { + return &Int16{name: name, description: description} +} + +func (k *Int16) Name() string { return k.name } +func (k *Int16) Description() string { return k.description } + +func (k *Int16) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int16) Of(v int16) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int16) Get(lm label.Map) int16 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int16) From(t label.Label) int16 { return int16(t.Unpack64()) } + +// Int32 represents a key +type Int32 struct { + name string + description string +} + +// NewInt32 creates a new Key for int32 values. +func NewInt32(name, description string) *Int32 { + return &Int32{name: name, description: description} +} + +func (k *Int32) Name() string { return k.name } +func (k *Int32) Description() string { return k.description } + +func (k *Int32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int32) Of(v int32) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int32) Get(lm label.Map) int32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int32) From(t label.Label) int32 { return int32(t.Unpack64()) } + +// Int64 represents a key +type Int64 struct { + name string + description string +} + +// NewInt64 creates a new Key for int64 values. +func NewInt64(name, description string) *Int64 { + return &Int64{name: name, description: description} +} + +func (k *Int64) Name() string { return k.name } +func (k *Int64) Description() string { return k.description } + +func (k *Int64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendInt(buf, k.From(l), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Int64) Of(v int64) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Int64) Get(lm label.Map) int64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Int64) From(t label.Label) int64 { return int64(t.Unpack64()) } + +// UInt represents a key +type UInt struct { + name string + description string +} + +// NewUInt creates a new Key for uint values. +func NewUInt(name, description string) *UInt { + return &UInt{name: name, description: description} +} + +func (k *UInt) Name() string { return k.name } +func (k *UInt) Description() string { return k.description } + +func (k *UInt) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt) Of(v uint) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt) Get(lm label.Map) uint { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt) From(t label.Label) uint { return uint(t.Unpack64()) } + +// UInt8 represents a key +type UInt8 struct { + name string + description string +} + +// NewUInt8 creates a new Key for uint8 values. +func NewUInt8(name, description string) *UInt8 { + return &UInt8{name: name, description: description} +} + +func (k *UInt8) Name() string { return k.name } +func (k *UInt8) Description() string { return k.description } + +func (k *UInt8) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt8) Of(v uint8) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt8) Get(lm label.Map) uint8 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt8) From(t label.Label) uint8 { return uint8(t.Unpack64()) } + +// UInt16 represents a key +type UInt16 struct { + name string + description string +} + +// NewUInt16 creates a new Key for uint16 values. +func NewUInt16(name, description string) *UInt16 { + return &UInt16{name: name, description: description} +} + +func (k *UInt16) Name() string { return k.name } +func (k *UInt16) Description() string { return k.description } + +func (k *UInt16) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt16) Of(v uint16) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt16) Get(lm label.Map) uint16 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt16) From(t label.Label) uint16 { return uint16(t.Unpack64()) } + +// UInt32 represents a key +type UInt32 struct { + name string + description string +} + +// NewUInt32 creates a new Key for uint32 values. +func NewUInt32(name, description string) *UInt32 { + return &UInt32{name: name, description: description} +} + +func (k *UInt32) Name() string { return k.name } +func (k *UInt32) Description() string { return k.description } + +func (k *UInt32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt32) Of(v uint32) label.Label { return label.Of64(k, uint64(v)) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt32) Get(lm label.Map) uint32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt32) From(t label.Label) uint32 { return uint32(t.Unpack64()) } + +// UInt64 represents a key +type UInt64 struct { + name string + description string +} + +// NewUInt64 creates a new Key for uint64 values. +func NewUInt64(name, description string) *UInt64 { + return &UInt64{name: name, description: description} +} + +func (k *UInt64) Name() string { return k.name } +func (k *UInt64) Description() string { return k.description } + +func (k *UInt64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendUint(buf, k.From(l), 10)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *UInt64) Of(v uint64) label.Label { return label.Of64(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *UInt64) Get(lm label.Map) uint64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *UInt64) From(t label.Label) uint64 { return t.Unpack64() } + +// Float32 represents a key +type Float32 struct { + name string + description string +} + +// NewFloat32 creates a new Key for float32 values. +func NewFloat32(name, description string) *Float32 { + return &Float32{name: name, description: description} +} + +func (k *Float32) Name() string { return k.name } +func (k *Float32) Description() string { return k.description } + +func (k *Float32) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendFloat(buf, float64(k.From(l)), 'E', -1, 32)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Float32) Of(v float32) label.Label { + return label.Of64(k, uint64(math.Float32bits(v))) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Float32) Get(lm label.Map) float32 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Float32) From(t label.Label) float32 { + return math.Float32frombits(uint32(t.Unpack64())) +} + +// Float64 represents a key +type Float64 struct { + name string + description string +} + +// NewFloat64 creates a new Key for int64 values. +func NewFloat64(name, description string) *Float64 { + return &Float64{name: name, description: description} +} + +func (k *Float64) Name() string { return k.name } +func (k *Float64) Description() string { return k.description } + +func (k *Float64) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendFloat(buf, k.From(l), 'E', -1, 64)) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Float64) Of(v float64) label.Label { + return label.Of64(k, math.Float64bits(v)) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Float64) Get(lm label.Map) float64 { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return 0 +} + +// From can be used to get a value from a Label. +func (k *Float64) From(t label.Label) float64 { + return math.Float64frombits(t.Unpack64()) +} + +// String represents a key +type String struct { + name string + description string +} + +// NewString creates a new Key for int64 values. +func NewString(name, description string) *String { + return &String{name: name, description: description} +} + +func (k *String) Name() string { return k.name } +func (k *String) Description() string { return k.description } + +func (k *String) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendQuote(buf, k.From(l))) +} + +// Of creates a new Label with this key and the supplied value. +func (k *String) Of(v string) label.Label { return label.OfString(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *String) Get(lm label.Map) string { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return "" +} + +// From can be used to get a value from a Label. +func (k *String) From(t label.Label) string { return t.UnpackString() } + +// Boolean represents a key +type Boolean struct { + name string + description string +} + +// NewBoolean creates a new Key for bool values. +func NewBoolean(name, description string) *Boolean { + return &Boolean{name: name, description: description} +} + +func (k *Boolean) Name() string { return k.name } +func (k *Boolean) Description() string { return k.description } + +func (k *Boolean) Format(w io.Writer, buf []byte, l label.Label) { + w.Write(strconv.AppendBool(buf, k.From(l))) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Boolean) Of(v bool) label.Label { + if v { + return label.Of64(k, 1) + } + return label.Of64(k, 0) +} + +// Get can be used to get a label for the key from a label.Map. +func (k *Boolean) Get(lm label.Map) bool { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return false +} + +// From can be used to get a value from a Label. +func (k *Boolean) From(t label.Label) bool { return t.Unpack64() > 0 } + +// Error represents a key +type Error struct { + name string + description string +} + +// NewError creates a new Key for int64 values. +func NewError(name, description string) *Error { + return &Error{name: name, description: description} +} + +func (k *Error) Name() string { return k.name } +func (k *Error) Description() string { return k.description } + +func (k *Error) Format(w io.Writer, buf []byte, l label.Label) { + io.WriteString(w, k.From(l).Error()) +} + +// Of creates a new Label with this key and the supplied value. +func (k *Error) Of(v error) label.Label { return label.OfValue(k, v) } + +// Get can be used to get a label for the key from a label.Map. +func (k *Error) Get(lm label.Map) error { + if t := lm.Find(k); t.Valid() { + return k.From(t) + } + return nil +} + +// From can be used to get a value from a Label. +func (k *Error) From(t label.Label) error { + err, _ := t.UnpackValue().(error) + return err +} diff --git a/vendor/golang.org/x/tools/internal/event/keys/standard.go b/vendor/golang.org/x/tools/internal/event/keys/standard.go new file mode 100644 index 0000000000000..7e95866592134 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/keys/standard.go @@ -0,0 +1,22 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package keys + +var ( + // Msg is a key used to add message strings to label lists. + Msg = NewString("message", "a readable message") + // Label is a key used to indicate an event adds labels to the context. + Label = NewTag("label", "a label context marker") + // Start is used for things like traces that have a name. + Start = NewString("start", "span start") + // Metric is a key used to indicate an event records metrics. + End = NewTag("end", "a span end marker") + // Metric is a key used to indicate an event records metrics. + Detach = NewTag("detach", "a span detach marker") + // Err is a key used to add error values to label lists. + Err = NewError("error", "an error that occurred") + // Metric is a key used to indicate an event records metrics. + Metric = NewTag("metric", "a metric event marker") +) diff --git a/vendor/golang.org/x/tools/internal/event/label/label.go b/vendor/golang.org/x/tools/internal/event/label/label.go new file mode 100644 index 0000000000000..0f526e1f9ab44 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/event/label/label.go @@ -0,0 +1,215 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package label + +import ( + "fmt" + "io" + "reflect" + "unsafe" +) + +// Key is used as the identity of a Label. +// Keys are intended to be compared by pointer only, the name should be unique +// for communicating with external systems, but it is not required or enforced. +type Key interface { + // Name returns the key name. + Name() string + // Description returns a string that can be used to describe the value. + Description() string + + // Format is used in formatting to append the value of the label to the + // supplied buffer. + // The formatter may use the supplied buf as a scratch area to avoid + // allocations. + Format(w io.Writer, buf []byte, l Label) +} + +// Label holds a key and value pair. +// It is normally used when passing around lists of labels. +type Label struct { + key Key + packed uint64 + untyped interface{} +} + +// Map is the interface to a collection of Labels indexed by key. +type Map interface { + // Find returns the label that matches the supplied key. + Find(key Key) Label +} + +// List is the interface to something that provides an iterable +// list of labels. +// Iteration should start from 0 and continue until Valid returns false. +type List interface { + // Valid returns true if the index is within range for the list. + // It does not imply the label at that index will itself be valid. + Valid(index int) bool + // Label returns the label at the given index. + Label(index int) Label +} + +// list implements LabelList for a list of Labels. +type list struct { + labels []Label +} + +// filter wraps a LabelList filtering out specific labels. +type filter struct { + keys []Key + underlying List +} + +// listMap implements LabelMap for a simple list of labels. +type listMap struct { + labels []Label +} + +// mapChain implements LabelMap for a list of underlying LabelMap. +type mapChain struct { + maps []Map +} + +// OfValue creates a new label from the key and value. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func OfValue(k Key, value interface{}) Label { return Label{key: k, untyped: value} } + +// UnpackValue assumes the label was built using LabelOfValue and returns the value +// that was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) UnpackValue() interface{} { return t.untyped } + +// Of64 creates a new label from a key and a uint64. This is often +// used for non uint64 values that can be packed into a uint64. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func Of64(k Key, v uint64) Label { return Label{key: k, packed: v} } + +// Unpack64 assumes the label was built using LabelOf64 and returns the value that +// was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) Unpack64() uint64 { return t.packed } + +type stringptr unsafe.Pointer + +// OfString creates a new label from a key and a string. +// This method is for implementing new key types, label creation should +// normally be done with the Of method of the key. +func OfString(k Key, v string) Label { + hdr := (*reflect.StringHeader)(unsafe.Pointer(&v)) + return Label{ + key: k, + packed: uint64(hdr.Len), + untyped: stringptr(hdr.Data), + } +} + +// UnpackString assumes the label was built using LabelOfString and returns the +// value that was passed to that constructor. +// This method is for implementing new key types, for type safety normal +// access should be done with the From method of the key. +func (t Label) UnpackString() string { + var v string + hdr := (*reflect.StringHeader)(unsafe.Pointer(&v)) + hdr.Data = uintptr(t.untyped.(stringptr)) + hdr.Len = int(t.packed) + return v +} + +// Valid returns true if the Label is a valid one (it has a key). +func (t Label) Valid() bool { return t.key != nil } + +// Key returns the key of this Label. +func (t Label) Key() Key { return t.key } + +// Format is used for debug printing of labels. +func (t Label) Format(f fmt.State, r rune) { + if !t.Valid() { + io.WriteString(f, `nil`) + return + } + io.WriteString(f, t.Key().Name()) + io.WriteString(f, "=") + var buf [128]byte + t.Key().Format(f, buf[:0], t) +} + +func (l *list) Valid(index int) bool { + return index >= 0 && index < len(l.labels) +} + +func (l *list) Label(index int) Label { + return l.labels[index] +} + +func (f *filter) Valid(index int) bool { + return f.underlying.Valid(index) +} + +func (f *filter) Label(index int) Label { + l := f.underlying.Label(index) + for _, f := range f.keys { + if l.Key() == f { + return Label{} + } + } + return l +} + +func (lm listMap) Find(key Key) Label { + for _, l := range lm.labels { + if l.Key() == key { + return l + } + } + return Label{} +} + +func (c mapChain) Find(key Key) Label { + for _, src := range c.maps { + l := src.Find(key) + if l.Valid() { + return l + } + } + return Label{} +} + +var emptyList = &list{} + +func NewList(labels ...Label) List { + if len(labels) == 0 { + return emptyList + } + return &list{labels: labels} +} + +func Filter(l List, keys ...Key) List { + if len(keys) == 0 { + return l + } + return &filter{keys: keys, underlying: l} +} + +func NewMap(labels ...Label) Map { + return listMap{labels: labels} +} + +func MergeMaps(srcs ...Map) Map { + var nonNil []Map + for _, src := range srcs { + if src != nil { + nonNil = append(nonNil, src) + } + } + if len(nonNil) == 1 { + return nonNil[0] + } + return mapChain{maps: nonNil} +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bexport.go b/vendor/golang.org/x/tools/internal/gcimporter/bexport.go new file mode 100644 index 0000000000000..30582ed6d3d70 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/bexport.go @@ -0,0 +1,852 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Binary package export. +// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go; +// see that file for specification of the format. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "math" + "math/big" + "sort" + "strings" +) + +// If debugFormat is set, each integer and string value is preceded by a marker +// and position information in the encoding. This mechanism permits an importer +// to recognize immediately when it is out of sync. The importer recognizes this +// mode automatically (i.e., it can import export data produced with debugging +// support even if debugFormat is not set at the time of import). This mode will +// lead to massively larger export data (by a factor of 2 to 3) and should only +// be enabled during development and debugging. +// +// NOTE: This flag is the first flag to enable if importing dies because of +// (suspected) format errors, and whenever a change is made to the format. +const debugFormat = false // default: false + +// Current export format version. Increase with each format change. +// +// Note: The latest binary (non-indexed) export format is at version 6. +// This exporter is still at level 4, but it doesn't matter since +// the binary importer can handle older versions just fine. +// +// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE +// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMENTED HERE +// 4: type name objects support type aliases, uses aliasTag +// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used) +// 2: removed unused bool in ODCL export (compiler only) +// 1: header format change (more regular), export package for _ struct fields +// 0: Go1.7 encoding +const exportVersion = 4 + +// trackAllTypes enables cycle tracking for all types, not just named +// types. The existing compiler invariants assume that unnamed types +// that are not completely set up are not used, or else there are spurious +// errors. +// If disabled, only named types are tracked, possibly leading to slightly +// less efficient encoding in rare cases. It also prevents the export of +// some corner-case type declarations (but those are not handled correctly +// with with the textual export format either). +// TODO(gri) enable and remove once issues caused by it are fixed +const trackAllTypes = false + +type exporter struct { + fset *token.FileSet + out bytes.Buffer + + // object -> index maps, indexed in order of serialization + strIndex map[string]int + pkgIndex map[*types.Package]int + typIndex map[types.Type]int + + // position encoding + posInfoFormat bool + prevFile string + prevLine int + + // debugging support + written int // bytes written + indent int // for trace +} + +// internalError represents an error generated inside this package. +type internalError string + +func (e internalError) Error() string { return "gcimporter: " + string(e) } + +func internalErrorf(format string, args ...interface{}) error { + return internalError(fmt.Sprintf(format, args...)) +} + +// BExportData returns binary export data for pkg. +// If no file set is provided, position info will be missing. +func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) { + if !debug { + defer func() { + if e := recover(); e != nil { + if ierr, ok := e.(internalError); ok { + err = ierr + return + } + // Not an internal error; panic again. + panic(e) + } + }() + } + + p := exporter{ + fset: fset, + strIndex: map[string]int{"": 0}, // empty string is mapped to 0 + pkgIndex: make(map[*types.Package]int), + typIndex: make(map[types.Type]int), + posInfoFormat: true, // TODO(gri) might become a flag, eventually + } + + // write version info + // The version string must start with "version %d" where %d is the version + // number. Additional debugging information may follow after a blank; that + // text is ignored by the importer. + p.rawStringln(fmt.Sprintf("version %d", exportVersion)) + var debug string + if debugFormat { + debug = "debug" + } + p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly + p.bool(trackAllTypes) + p.bool(p.posInfoFormat) + + // --- generic export data --- + + // populate type map with predeclared "known" types + for index, typ := range predeclared() { + p.typIndex[typ] = index + } + if len(p.typIndex) != len(predeclared()) { + return nil, internalError("duplicate entries in type map?") + } + + // write package data + p.pkg(pkg, true) + if trace { + p.tracef("\n") + } + + // write objects + objcount := 0 + scope := pkg.Scope() + for _, name := range scope.Names() { + if !token.IsExported(name) { + continue + } + if trace { + p.tracef("\n") + } + p.obj(scope.Lookup(name)) + objcount++ + } + + // indicate end of list + if trace { + p.tracef("\n") + } + p.tag(endTag) + + // for self-verification only (redundant) + p.int(objcount) + + if trace { + p.tracef("\n") + } + + // --- end of export data --- + + return p.out.Bytes(), nil +} + +func (p *exporter) pkg(pkg *types.Package, emptypath bool) { + if pkg == nil { + panic(internalError("unexpected nil pkg")) + } + + // if we saw the package before, write its index (>= 0) + if i, ok := p.pkgIndex[pkg]; ok { + p.index('P', i) + return + } + + // otherwise, remember the package, write the package tag (< 0) and package data + if trace { + p.tracef("P%d = { ", len(p.pkgIndex)) + defer p.tracef("} ") + } + p.pkgIndex[pkg] = len(p.pkgIndex) + + p.tag(packageTag) + p.string(pkg.Name()) + if emptypath { + p.string("") + } else { + p.string(pkg.Path()) + } +} + +func (p *exporter) obj(obj types.Object) { + switch obj := obj.(type) { + case *types.Const: + p.tag(constTag) + p.pos(obj) + p.qualifiedName(obj) + p.typ(obj.Type()) + p.value(obj.Val()) + + case *types.TypeName: + if obj.IsAlias() { + p.tag(aliasTag) + p.pos(obj) + p.qualifiedName(obj) + } else { + p.tag(typeTag) + } + p.typ(obj.Type()) + + case *types.Var: + p.tag(varTag) + p.pos(obj) + p.qualifiedName(obj) + p.typ(obj.Type()) + + case *types.Func: + p.tag(funcTag) + p.pos(obj) + p.qualifiedName(obj) + sig := obj.Type().(*types.Signature) + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) + + default: + panic(internalErrorf("unexpected object %v (%T)", obj, obj)) + } +} + +func (p *exporter) pos(obj types.Object) { + if !p.posInfoFormat { + return + } + + file, line := p.fileLine(obj) + if file == p.prevFile { + // common case: write line delta + // delta == 0 means different file or no line change + delta := line - p.prevLine + p.int(delta) + if delta == 0 { + p.int(-1) // -1 means no file change + } + } else { + // different file + p.int(0) + // Encode filename as length of common prefix with previous + // filename, followed by (possibly empty) suffix. Filenames + // frequently share path prefixes, so this can save a lot + // of space and make export data size less dependent on file + // path length. The suffix is unlikely to be empty because + // file names tend to end in ".go". + n := commonPrefixLen(p.prevFile, file) + p.int(n) // n >= 0 + p.string(file[n:]) // write suffix only + p.prevFile = file + p.int(line) + } + p.prevLine = line +} + +func (p *exporter) fileLine(obj types.Object) (file string, line int) { + if p.fset != nil { + pos := p.fset.Position(obj.Pos()) + file = pos.Filename + line = pos.Line + } + return +} + +func commonPrefixLen(a, b string) int { + if len(a) > len(b) { + a, b = b, a + } + // len(a) <= len(b) + i := 0 + for i < len(a) && a[i] == b[i] { + i++ + } + return i +} + +func (p *exporter) qualifiedName(obj types.Object) { + p.string(obj.Name()) + p.pkg(obj.Pkg(), false) +} + +func (p *exporter) typ(t types.Type) { + if t == nil { + panic(internalError("nil type")) + } + + // Possible optimization: Anonymous pointer types *T where + // T is a named type are common. We could canonicalize all + // such types *T to a single type PT = *T. This would lead + // to at most one *T entry in typIndex, and all future *T's + // would be encoded as the respective index directly. Would + // save 1 byte (pointerTag) per *T and reduce the typIndex + // size (at the cost of a canonicalization map). We can do + // this later, without encoding format change. + + // if we saw the type before, write its index (>= 0) + if i, ok := p.typIndex[t]; ok { + p.index('T', i) + return + } + + // otherwise, remember the type, write the type tag (< 0) and type data + if trackAllTypes { + if trace { + p.tracef("T%d = {>\n", len(p.typIndex)) + defer p.tracef("<\n} ") + } + p.typIndex[t] = len(p.typIndex) + } + + switch t := t.(type) { + case *types.Named: + if !trackAllTypes { + // if we don't track all types, track named types now + p.typIndex[t] = len(p.typIndex) + } + + p.tag(namedTag) + p.pos(t.Obj()) + p.qualifiedName(t.Obj()) + p.typ(t.Underlying()) + if !types.IsInterface(t) { + p.assocMethods(t) + } + + case *types.Array: + p.tag(arrayTag) + p.int64(t.Len()) + p.typ(t.Elem()) + + case *types.Slice: + p.tag(sliceTag) + p.typ(t.Elem()) + + case *dddSlice: + p.tag(dddTag) + p.typ(t.elem) + + case *types.Struct: + p.tag(structTag) + p.fieldList(t) + + case *types.Pointer: + p.tag(pointerTag) + p.typ(t.Elem()) + + case *types.Signature: + p.tag(signatureTag) + p.paramList(t.Params(), t.Variadic()) + p.paramList(t.Results(), false) + + case *types.Interface: + p.tag(interfaceTag) + p.iface(t) + + case *types.Map: + p.tag(mapTag) + p.typ(t.Key()) + p.typ(t.Elem()) + + case *types.Chan: + p.tag(chanTag) + p.int(int(3 - t.Dir())) // hack + p.typ(t.Elem()) + + default: + panic(internalErrorf("unexpected type %T: %s", t, t)) + } +} + +func (p *exporter) assocMethods(named *types.Named) { + // Sort methods (for determinism). + var methods []*types.Func + for i := 0; i < named.NumMethods(); i++ { + methods = append(methods, named.Method(i)) + } + sort.Sort(methodsByName(methods)) + + p.int(len(methods)) + + if trace && methods != nil { + p.tracef("associated methods {>\n") + } + + for i, m := range methods { + if trace && i > 0 { + p.tracef("\n") + } + + p.pos(m) + name := m.Name() + p.string(name) + if !exported(name) { + p.pkg(m.Pkg(), false) + } + + sig := m.Type().(*types.Signature) + p.paramList(types.NewTuple(sig.Recv()), false) + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) + p.int(0) // dummy value for go:nointerface pragma - ignored by importer + } + + if trace && methods != nil { + p.tracef("<\n} ") + } +} + +type methodsByName []*types.Func + +func (x methodsByName) Len() int { return len(x) } +func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() } + +func (p *exporter) fieldList(t *types.Struct) { + if trace && t.NumFields() > 0 { + p.tracef("fields {>\n") + defer p.tracef("<\n} ") + } + + p.int(t.NumFields()) + for i := 0; i < t.NumFields(); i++ { + if trace && i > 0 { + p.tracef("\n") + } + p.field(t.Field(i)) + p.string(t.Tag(i)) + } +} + +func (p *exporter) field(f *types.Var) { + if !f.IsField() { + panic(internalError("field expected")) + } + + p.pos(f) + p.fieldName(f) + p.typ(f.Type()) +} + +func (p *exporter) iface(t *types.Interface) { + // TODO(gri): enable importer to load embedded interfaces, + // then emit Embeddeds and ExplicitMethods separately here. + p.int(0) + + n := t.NumMethods() + if trace && n > 0 { + p.tracef("methods {>\n") + defer p.tracef("<\n} ") + } + p.int(n) + for i := 0; i < n; i++ { + if trace && i > 0 { + p.tracef("\n") + } + p.method(t.Method(i)) + } +} + +func (p *exporter) method(m *types.Func) { + sig := m.Type().(*types.Signature) + if sig.Recv() == nil { + panic(internalError("method expected")) + } + + p.pos(m) + p.string(m.Name()) + if m.Name() != "_" && !token.IsExported(m.Name()) { + p.pkg(m.Pkg(), false) + } + + // interface method; no need to encode receiver. + p.paramList(sig.Params(), sig.Variadic()) + p.paramList(sig.Results(), false) +} + +func (p *exporter) fieldName(f *types.Var) { + name := f.Name() + + if f.Anonymous() { + // anonymous field - we distinguish between 3 cases: + // 1) field name matches base type name and is exported + // 2) field name matches base type name and is not exported + // 3) field name doesn't match base type name (alias name) + bname := basetypeName(f.Type()) + if name == bname { + if token.IsExported(name) { + name = "" // 1) we don't need to know the field name or package + } else { + name = "?" // 2) use unexported name "?" to force package export + } + } else { + // 3) indicate alias and export name as is + // (this requires an extra "@" but this is a rare case) + p.string("@") + } + } + + p.string(name) + if name != "" && !token.IsExported(name) { + p.pkg(f.Pkg(), false) + } +} + +func basetypeName(typ types.Type) string { + switch typ := deref(typ).(type) { + case *types.Basic: + return typ.Name() + case *types.Named: + return typ.Obj().Name() + default: + return "" // unnamed type + } +} + +func (p *exporter) paramList(params *types.Tuple, variadic bool) { + // use negative length to indicate unnamed parameters + // (look at the first parameter only since either all + // names are present or all are absent) + n := params.Len() + if n > 0 && params.At(0).Name() == "" { + n = -n + } + p.int(n) + for i := 0; i < params.Len(); i++ { + q := params.At(i) + t := q.Type() + if variadic && i == params.Len()-1 { + t = &dddSlice{t.(*types.Slice).Elem()} + } + p.typ(t) + if n > 0 { + name := q.Name() + p.string(name) + if name != "_" { + p.pkg(q.Pkg(), false) + } + } + p.string("") // no compiler-specific info + } +} + +func (p *exporter) value(x constant.Value) { + if trace { + p.tracef("= ") + } + + switch x.Kind() { + case constant.Bool: + tag := falseTag + if constant.BoolVal(x) { + tag = trueTag + } + p.tag(tag) + + case constant.Int: + if v, exact := constant.Int64Val(x); exact { + // common case: x fits into an int64 - use compact encoding + p.tag(int64Tag) + p.int64(v) + return + } + // uncommon case: large x - use float encoding + // (powers of 2 will be encoded efficiently with exponent) + p.tag(floatTag) + p.float(constant.ToFloat(x)) + + case constant.Float: + p.tag(floatTag) + p.float(x) + + case constant.Complex: + p.tag(complexTag) + p.float(constant.Real(x)) + p.float(constant.Imag(x)) + + case constant.String: + p.tag(stringTag) + p.string(constant.StringVal(x)) + + case constant.Unknown: + // package contains type errors + p.tag(unknownTag) + + default: + panic(internalErrorf("unexpected value %v (%T)", x, x)) + } +} + +func (p *exporter) float(x constant.Value) { + if x.Kind() != constant.Float { + panic(internalErrorf("unexpected constant %v, want float", x)) + } + // extract sign (there is no -0) + sign := constant.Sign(x) + if sign == 0 { + // x == 0 + p.int(0) + return + } + // x != 0 + + var f big.Float + if v, exact := constant.Float64Val(x); exact { + // float64 + f.SetFloat64(v) + } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { + // TODO(gri): add big.Rat accessor to constant.Value. + r := valueToRat(num) + f.SetRat(r.Quo(r, valueToRat(denom))) + } else { + // Value too large to represent as a fraction => inaccessible. + // TODO(gri): add big.Float accessor to constant.Value. + f.SetFloat64(math.MaxFloat64) // FIXME + } + + // extract exponent such that 0.5 <= m < 1.0 + var m big.Float + exp := f.MantExp(&m) + + // extract mantissa as *big.Int + // - set exponent large enough so mant satisfies mant.IsInt() + // - get *big.Int from mant + m.SetMantExp(&m, int(m.MinPrec())) + mant, acc := m.Int(nil) + if acc != big.Exact { + panic(internalError("internal error")) + } + + p.int(sign) + p.int(exp) + p.string(string(mant.Bytes())) +} + +func valueToRat(x constant.Value) *big.Rat { + // Convert little-endian to big-endian. + // I can't believe this is necessary. + bytes := constant.Bytes(x) + for i := 0; i < len(bytes)/2; i++ { + bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i] + } + return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes)) +} + +func (p *exporter) bool(b bool) bool { + if trace { + p.tracef("[") + defer p.tracef("= %v] ", b) + } + + x := 0 + if b { + x = 1 + } + p.int(x) + return b +} + +// ---------------------------------------------------------------------------- +// Low-level encoders + +func (p *exporter) index(marker byte, index int) { + if index < 0 { + panic(internalError("invalid index < 0")) + } + if debugFormat { + p.marker('t') + } + if trace { + p.tracef("%c%d ", marker, index) + } + p.rawInt64(int64(index)) +} + +func (p *exporter) tag(tag int) { + if tag >= 0 { + panic(internalError("invalid tag >= 0")) + } + if debugFormat { + p.marker('t') + } + if trace { + p.tracef("%s ", tagString[-tag]) + } + p.rawInt64(int64(tag)) +} + +func (p *exporter) int(x int) { + p.int64(int64(x)) +} + +func (p *exporter) int64(x int64) { + if debugFormat { + p.marker('i') + } + if trace { + p.tracef("%d ", x) + } + p.rawInt64(x) +} + +func (p *exporter) string(s string) { + if debugFormat { + p.marker('s') + } + if trace { + p.tracef("%q ", s) + } + // if we saw the string before, write its index (>= 0) + // (the empty string is mapped to 0) + if i, ok := p.strIndex[s]; ok { + p.rawInt64(int64(i)) + return + } + // otherwise, remember string and write its negative length and bytes + p.strIndex[s] = len(p.strIndex) + p.rawInt64(-int64(len(s))) + for i := 0; i < len(s); i++ { + p.rawByte(s[i]) + } +} + +// marker emits a marker byte and position information which makes +// it easy for a reader to detect if it is "out of sync". Used for +// debugFormat format only. +func (p *exporter) marker(m byte) { + p.rawByte(m) + // Enable this for help tracking down the location + // of an incorrect marker when running in debugFormat. + if false && trace { + p.tracef("#%d ", p.written) + } + p.rawInt64(int64(p.written)) +} + +// rawInt64 should only be used by low-level encoders. +func (p *exporter) rawInt64(x int64) { + var tmp [binary.MaxVarintLen64]byte + n := binary.PutVarint(tmp[:], x) + for i := 0; i < n; i++ { + p.rawByte(tmp[i]) + } +} + +// rawStringln should only be used to emit the initial version string. +func (p *exporter) rawStringln(s string) { + for i := 0; i < len(s); i++ { + p.rawByte(s[i]) + } + p.rawByte('\n') +} + +// rawByte is the bottleneck interface to write to p.out. +// rawByte escapes b as follows (any encoding does that +// hides '$'): +// +// '$' => '|' 'S' +// '|' => '|' '|' +// +// Necessary so other tools can find the end of the +// export data by searching for "$$". +// rawByte should only be used by low-level encoders. +func (p *exporter) rawByte(b byte) { + switch b { + case '$': + // write '$' as '|' 'S' + b = 'S' + fallthrough + case '|': + // write '|' as '|' '|' + p.out.WriteByte('|') + p.written++ + } + p.out.WriteByte(b) + p.written++ +} + +// tracef is like fmt.Printf but it rewrites the format string +// to take care of indentation. +func (p *exporter) tracef(format string, args ...interface{}) { + if strings.ContainsAny(format, "<>\n") { + var buf bytes.Buffer + for i := 0; i < len(format); i++ { + // no need to deal with runes + ch := format[i] + switch ch { + case '>': + p.indent++ + continue + case '<': + p.indent-- + continue + } + buf.WriteByte(ch) + if ch == '\n' { + for j := p.indent; j > 0; j-- { + buf.WriteString(". ") + } + } + } + format = buf.String() + } + fmt.Printf(format, args...) +} + +// Debugging support. +// (tagString is only used when tracing is enabled) +var tagString = [...]string{ + // Packages + -packageTag: "package", + + // Types + -namedTag: "named type", + -arrayTag: "array", + -sliceTag: "slice", + -dddTag: "ddd", + -structTag: "struct", + -pointerTag: "pointer", + -signatureTag: "signature", + -interfaceTag: "interface", + -mapTag: "map", + -chanTag: "chan", + + // Values + -falseTag: "false", + -trueTag: "true", + -int64Tag: "int64", + -floatTag: "float", + -fractionTag: "fraction", + -complexTag: "complex", + -stringTag: "string", + -unknownTag: "unknown", + + // Type aliases + -aliasTag: "alias", +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go new file mode 100644 index 0000000000000..b85de01470018 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -0,0 +1,1053 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/bimport.go. + +package gcimporter + +import ( + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +type importer struct { + imports map[string]*types.Package + data []byte + importpath string + buf []byte // for reading strings + version int // export format version + + // object lists + strList []string // in order of appearance + pathList []string // in order of appearance + pkgList []*types.Package // in order of appearance + typList []types.Type // in order of appearance + interfaceList []*types.Interface // for delayed completion only + trackAllTypes bool + + // position encoding + posInfoFormat bool + prevFile string + prevLine int + fake fakeFileSet + + // debugging support + debugFormat bool + read int // bytes read +} + +// BImportData imports a package from the serialized package data +// and returns the number of bytes consumed and a reference to the package. +// If the export data version is not recognized or the format is otherwise +// compromised, an error is returned. +func BImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + // catch panics and return them as errors + const currentVersion = 6 + version := -1 // unknown version + defer func() { + if e := recover(); e != nil { + // Return a (possibly nil or incomplete) package unchanged (see #16088). + if version > currentVersion { + err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) + } else { + err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + } + } + }() + + p := importer{ + imports: imports, + data: data, + importpath: path, + version: version, + strList: []string{""}, // empty string is mapped to 0 + pathList: []string{""}, // empty string is mapped to 0 + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*fileInfo), + }, + } + defer p.fake.setLines() // set lines for files in fset + + // read version info + var versionstr string + if b := p.rawByte(); b == 'c' || b == 'd' { + // Go1.7 encoding; first byte encodes low-level + // encoding format (compact vs debug). + // For backward-compatibility only (avoid problems with + // old installed packages). Newly compiled packages use + // the extensible format string. + // TODO(gri) Remove this support eventually; after Go1.8. + if b == 'd' { + p.debugFormat = true + } + p.trackAllTypes = p.rawByte() == 'a' + p.posInfoFormat = p.int() != 0 + versionstr = p.string() + if versionstr == "v1" { + version = 0 + } + } else { + // Go1.8 extensible encoding + // read version string and extract version number (ignore anything after the version number) + versionstr = p.rawStringln(b) + if s := strings.SplitN(versionstr, " ", 3); len(s) >= 2 && s[0] == "version" { + if v, err := strconv.Atoi(s[1]); err == nil && v > 0 { + version = v + } + } + } + p.version = version + + // read version specific flags - extend as necessary + switch p.version { + // case currentVersion: + // ... + // fallthrough + case currentVersion, 5, 4, 3, 2, 1: + p.debugFormat = p.rawStringln(p.rawByte()) == "debug" + p.trackAllTypes = p.int() != 0 + p.posInfoFormat = p.int() != 0 + case 0: + // Go1.7 encoding format - nothing to do here + default: + errorf("unknown bexport format version %d (%q)", p.version, versionstr) + } + + // --- generic export data --- + + // populate typList with predeclared "known" types + p.typList = append(p.typList, predeclared()...) + + // read package data + pkg = p.pkg() + + // read objects of phase 1 only (see cmd/compile/internal/gc/bexport.go) + objcount := 0 + for { + tag := p.tagOrIndex() + if tag == endTag { + break + } + p.obj(tag) + objcount++ + } + + // self-verification + if count := p.int(); count != objcount { + errorf("got %d objects; want %d", objcount, count) + } + + // ignore compiler-specific import data + + // complete interfaces + // TODO(gri) re-investigate if we still need to do this in a delayed fashion + for _, typ := range p.interfaceList { + typ.Complete() + } + + // record all referenced packages as imports + list := append(([]*types.Package)(nil), p.pkgList[1:]...) + sort.Sort(byPath(list)) + pkg.SetImports(list) + + // package was imported completely and without errors + pkg.MarkComplete() + + return p.read, pkg, nil +} + +func errorf(format string, args ...interface{}) { + panic(fmt.Sprintf(format, args...)) +} + +func (p *importer) pkg() *types.Package { + // if the package was seen before, i is its index (>= 0) + i := p.tagOrIndex() + if i >= 0 { + return p.pkgList[i] + } + + // otherwise, i is the package tag (< 0) + if i != packageTag { + errorf("unexpected package tag %d version %d", i, p.version) + } + + // read package data + name := p.string() + var path string + if p.version >= 5 { + path = p.path() + } else { + path = p.string() + } + if p.version >= 6 { + p.int() // package height; unused by go/types + } + + // we should never see an empty package name + if name == "" { + errorf("empty package name in import") + } + + // an empty path denotes the package we are currently importing; + // it must be the first package we see + if (path == "") != (len(p.pkgList) == 0) { + errorf("package path %q for pkg index %d", path, len(p.pkgList)) + } + + // if the package was imported before, use that one; otherwise create a new one + if path == "" { + path = p.importpath + } + pkg := p.imports[path] + if pkg == nil { + pkg = types.NewPackage(path, name) + p.imports[path] = pkg + } else if pkg.Name() != name { + errorf("conflicting names %s and %s for package %q", pkg.Name(), name, path) + } + p.pkgList = append(p.pkgList, pkg) + + return pkg +} + +// objTag returns the tag value for each object kind. +func objTag(obj types.Object) int { + switch obj.(type) { + case *types.Const: + return constTag + case *types.TypeName: + return typeTag + case *types.Var: + return varTag + case *types.Func: + return funcTag + default: + errorf("unexpected object: %v (%T)", obj, obj) // panics + panic("unreachable") + } +} + +func sameObj(a, b types.Object) bool { + // Because unnamed types are not canonicalized, we cannot simply compare types for + // (pointer) identity. + // Ideally we'd check equality of constant values as well, but this is good enough. + return objTag(a) == objTag(b) && types.Identical(a.Type(), b.Type()) +} + +func (p *importer) declare(obj types.Object) { + pkg := obj.Pkg() + if alt := pkg.Scope().Insert(obj); alt != nil { + // This can only trigger if we import a (non-type) object a second time. + // Excluding type aliases, this cannot happen because 1) we only import a package + // once; and b) we ignore compiler-specific export data which may contain + // functions whose inlined function bodies refer to other functions that + // were already imported. + // However, type aliases require reexporting the original type, so we need + // to allow it (see also the comment in cmd/compile/internal/gc/bimport.go, + // method importer.obj, switch case importing functions). + // TODO(gri) review/update this comment once the gc compiler handles type aliases. + if !sameObj(obj, alt) { + errorf("inconsistent import:\n\t%v\npreviously imported as:\n\t%v\n", obj, alt) + } + } +} + +func (p *importer) obj(tag int) { + switch tag { + case constTag: + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + val := p.value() + p.declare(types.NewConst(pos, pkg, name, typ, val)) + + case aliasTag: + // TODO(gri) verify type alias hookup is correct + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + p.declare(types.NewTypeName(pos, pkg, name, typ)) + + case typeTag: + p.typ(nil, nil) + + case varTag: + pos := p.pos() + pkg, name := p.qualifiedName() + typ := p.typ(nil, nil) + p.declare(types.NewVar(pos, pkg, name, typ)) + + case funcTag: + pos := p.pos() + pkg, name := p.qualifiedName() + params, isddd := p.paramList() + result, _ := p.paramList() + sig := types.NewSignature(nil, params, result, isddd) + p.declare(types.NewFunc(pos, pkg, name, sig)) + + default: + errorf("unexpected object tag %d", tag) + } +} + +const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go + +func (p *importer) pos() token.Pos { + if !p.posInfoFormat { + return token.NoPos + } + + file := p.prevFile + line := p.prevLine + delta := p.int() + line += delta + if p.version >= 5 { + if delta == deltaNewFile { + if n := p.int(); n >= 0 { + // file changed + file = p.path() + line = n + } + } + } else { + if delta == 0 { + if n := p.int(); n >= 0 { + // file changed + file = p.prevFile[:n] + p.string() + line = p.int() + } + } + } + p.prevFile = file + p.prevLine = line + + return p.fake.pos(file, line, 0) +} + +// Synthesize a token.Pos +type fakeFileSet struct { + fset *token.FileSet + files map[string]*fileInfo +} + +type fileInfo struct { + file *token.File + lastline int +} + +const maxlines = 64 * 1024 + +func (s *fakeFileSet) pos(file string, line, column int) token.Pos { + // TODO(mdempsky): Make use of column. + + // Since we don't know the set of needed file positions, we reserve maxlines + // positions per file. We delay calling token.File.SetLines until all + // positions have been calculated (by way of fakeFileSet.setLines), so that + // we can avoid setting unnecessary lines. See also golang/go#46586. + f := s.files[file] + if f == nil { + f = &fileInfo{file: s.fset.AddFile(file, -1, maxlines)} + s.files[file] = f + } + if line > maxlines { + line = 1 + } + if line > f.lastline { + f.lastline = line + } + + // Return a fake position assuming that f.file consists only of newlines. + return token.Pos(f.file.Base() + line - 1) +} + +func (s *fakeFileSet) setLines() { + fakeLinesOnce.Do(func() { + fakeLines = make([]int, maxlines) + for i := range fakeLines { + fakeLines[i] = i + } + }) + for _, f := range s.files { + f.file.SetLines(fakeLines[:f.lastline]) + } +} + +var ( + fakeLines []int + fakeLinesOnce sync.Once +) + +func (p *importer) qualifiedName() (pkg *types.Package, name string) { + name = p.string() + pkg = p.pkg() + return +} + +func (p *importer) record(t types.Type) { + p.typList = append(p.typList, t) +} + +// A dddSlice is a types.Type representing ...T parameters. +// It only appears for parameter types and does not escape +// the importer. +type dddSlice struct { + elem types.Type +} + +func (t *dddSlice) Underlying() types.Type { return t } +func (t *dddSlice) String() string { return "..." + t.elem.String() } + +// parent is the package which declared the type; parent == nil means +// the package currently imported. The parent package is needed for +// exported struct fields and interface methods which don't contain +// explicit package information in the export data. +// +// A non-nil tname is used as the "owner" of the result type; i.e., +// the result type is the underlying type of tname. tname is used +// to give interface methods a named receiver type where possible. +func (p *importer) typ(parent *types.Package, tname *types.Named) types.Type { + // if the type was seen before, i is its index (>= 0) + i := p.tagOrIndex() + if i >= 0 { + return p.typList[i] + } + + // otherwise, i is the type tag (< 0) + switch i { + case namedTag: + // read type object + pos := p.pos() + parent, name := p.qualifiedName() + scope := parent.Scope() + obj := scope.Lookup(name) + + // if the object doesn't exist yet, create and insert it + if obj == nil { + obj = types.NewTypeName(pos, parent, name, nil) + scope.Insert(obj) + } + + if _, ok := obj.(*types.TypeName); !ok { + errorf("pkg = %s, name = %s => %s", parent, name, obj) + } + + // associate new named type with obj if it doesn't exist yet + t0 := types.NewNamed(obj.(*types.TypeName), nil, nil) + + // but record the existing type, if any + tname := obj.Type().(*types.Named) // tname is either t0 or the existing type + p.record(tname) + + // read underlying type + t0.SetUnderlying(p.typ(parent, t0)) + + // interfaces don't have associated methods + if types.IsInterface(t0) { + return tname + } + + // read associated methods + for i := p.int(); i > 0; i-- { + // TODO(gri) replace this with something closer to fieldName + pos := p.pos() + name := p.string() + if !exported(name) { + p.pkg() + } + + recv, _ := p.paramList() // TODO(gri) do we need a full param list for the receiver? + params, isddd := p.paramList() + result, _ := p.paramList() + p.int() // go:nointerface pragma - discarded + + sig := types.NewSignature(recv.At(0), params, result, isddd) + t0.AddMethod(types.NewFunc(pos, parent, name, sig)) + } + + return tname + + case arrayTag: + t := new(types.Array) + if p.trackAllTypes { + p.record(t) + } + + n := p.int64() + *t = *types.NewArray(p.typ(parent, nil), n) + return t + + case sliceTag: + t := new(types.Slice) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewSlice(p.typ(parent, nil)) + return t + + case dddTag: + t := new(dddSlice) + if p.trackAllTypes { + p.record(t) + } + + t.elem = p.typ(parent, nil) + return t + + case structTag: + t := new(types.Struct) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewStruct(p.fieldList(parent)) + return t + + case pointerTag: + t := new(types.Pointer) + if p.trackAllTypes { + p.record(t) + } + + *t = *types.NewPointer(p.typ(parent, nil)) + return t + + case signatureTag: + t := new(types.Signature) + if p.trackAllTypes { + p.record(t) + } + + params, isddd := p.paramList() + result, _ := p.paramList() + *t = *types.NewSignature(nil, params, result, isddd) + return t + + case interfaceTag: + // Create a dummy entry in the type list. This is safe because we + // cannot expect the interface type to appear in a cycle, as any + // such cycle must contain a named type which would have been + // first defined earlier. + // TODO(gri) Is this still true now that we have type aliases? + // See issue #23225. + n := len(p.typList) + if p.trackAllTypes { + p.record(nil) + } + + var embeddeds []types.Type + for n := p.int(); n > 0; n-- { + p.pos() + embeddeds = append(embeddeds, p.typ(parent, nil)) + } + + t := newInterface(p.methodList(parent, tname), embeddeds) + p.interfaceList = append(p.interfaceList, t) + if p.trackAllTypes { + p.typList[n] = t + } + return t + + case mapTag: + t := new(types.Map) + if p.trackAllTypes { + p.record(t) + } + + key := p.typ(parent, nil) + val := p.typ(parent, nil) + *t = *types.NewMap(key, val) + return t + + case chanTag: + t := new(types.Chan) + if p.trackAllTypes { + p.record(t) + } + + dir := chanDir(p.int()) + val := p.typ(parent, nil) + *t = *types.NewChan(dir, val) + return t + + default: + errorf("unexpected type tag %d", i) // panics + panic("unreachable") + } +} + +func chanDir(d int) types.ChanDir { + // tag values must match the constants in cmd/compile/internal/gc/go.go + switch d { + case 1 /* Crecv */ : + return types.RecvOnly + case 2 /* Csend */ : + return types.SendOnly + case 3 /* Cboth */ : + return types.SendRecv + default: + errorf("unexpected channel dir %d", d) + return 0 + } +} + +func (p *importer) fieldList(parent *types.Package) (fields []*types.Var, tags []string) { + if n := p.int(); n > 0 { + fields = make([]*types.Var, n) + tags = make([]string, n) + for i := range fields { + fields[i], tags[i] = p.field(parent) + } + } + return +} + +func (p *importer) field(parent *types.Package) (*types.Var, string) { + pos := p.pos() + pkg, name, alias := p.fieldName(parent) + typ := p.typ(parent, nil) + tag := p.string() + + anonymous := false + if name == "" { + // anonymous field - typ must be T or *T and T must be a type name + switch typ := deref(typ).(type) { + case *types.Basic: // basic types are named types + pkg = nil // // objects defined in Universe scope have no package + name = typ.Name() + case *types.Named: + name = typ.Obj().Name() + default: + errorf("named base type expected") + } + anonymous = true + } else if alias { + // anonymous field: we have an explicit name because it's an alias + anonymous = true + } + + return types.NewField(pos, pkg, name, typ, anonymous), tag +} + +func (p *importer) methodList(parent *types.Package, baseType *types.Named) (methods []*types.Func) { + if n := p.int(); n > 0 { + methods = make([]*types.Func, n) + for i := range methods { + methods[i] = p.method(parent, baseType) + } + } + return +} + +func (p *importer) method(parent *types.Package, baseType *types.Named) *types.Func { + pos := p.pos() + pkg, name, _ := p.fieldName(parent) + // If we don't have a baseType, use a nil receiver. + // A receiver using the actual interface type (which + // we don't know yet) will be filled in when we call + // types.Interface.Complete. + var recv *types.Var + if baseType != nil { + recv = types.NewVar(token.NoPos, parent, "", baseType) + } + params, isddd := p.paramList() + result, _ := p.paramList() + sig := types.NewSignature(recv, params, result, isddd) + return types.NewFunc(pos, pkg, name, sig) +} + +func (p *importer) fieldName(parent *types.Package) (pkg *types.Package, name string, alias bool) { + name = p.string() + pkg = parent + if pkg == nil { + // use the imported package instead + pkg = p.pkgList[0] + } + if p.version == 0 && name == "_" { + // version 0 didn't export a package for _ fields + return + } + switch name { + case "": + // 1) field name matches base type name and is exported: nothing to do + case "?": + // 2) field name matches base type name and is not exported: need package + name = "" + pkg = p.pkg() + case "@": + // 3) field name doesn't match type name (alias) + name = p.string() + alias = true + fallthrough + default: + if !exported(name) { + pkg = p.pkg() + } + } + return +} + +func (p *importer) paramList() (*types.Tuple, bool) { + n := p.int() + if n == 0 { + return nil, false + } + // negative length indicates unnamed parameters + named := true + if n < 0 { + n = -n + named = false + } + // n > 0 + params := make([]*types.Var, n) + isddd := false + for i := range params { + params[i], isddd = p.param(named) + } + return types.NewTuple(params...), isddd +} + +func (p *importer) param(named bool) (*types.Var, bool) { + t := p.typ(nil, nil) + td, isddd := t.(*dddSlice) + if isddd { + t = types.NewSlice(td.elem) + } + + var pkg *types.Package + var name string + if named { + name = p.string() + if name == "" { + errorf("expected named parameter") + } + if name != "_" { + pkg = p.pkg() + } + if i := strings.Index(name, "·"); i > 0 { + name = name[:i] // cut off gc-specific parameter numbering + } + } + + // read and discard compiler-specific info + p.string() + + return types.NewVar(token.NoPos, pkg, name, t), isddd +} + +func exported(name string) bool { + ch, _ := utf8.DecodeRuneInString(name) + return unicode.IsUpper(ch) +} + +func (p *importer) value() constant.Value { + switch tag := p.tagOrIndex(); tag { + case falseTag: + return constant.MakeBool(false) + case trueTag: + return constant.MakeBool(true) + case int64Tag: + return constant.MakeInt64(p.int64()) + case floatTag: + return p.float() + case complexTag: + re := p.float() + im := p.float() + return constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + case stringTag: + return constant.MakeString(p.string()) + case unknownTag: + return constant.MakeUnknown() + default: + errorf("unexpected value tag %d", tag) // panics + panic("unreachable") + } +} + +func (p *importer) float() constant.Value { + sign := p.int() + if sign == 0 { + return constant.MakeInt64(0) + } + + exp := p.int() + mant := []byte(p.string()) // big endian + + // remove leading 0's if any + for len(mant) > 0 && mant[0] == 0 { + mant = mant[1:] + } + + // convert to little endian + // TODO(gri) go/constant should have a more direct conversion function + // (e.g., once it supports a big.Float based implementation) + for i, j := 0, len(mant)-1; i < j; i, j = i+1, j-1 { + mant[i], mant[j] = mant[j], mant[i] + } + + // adjust exponent (constant.MakeFromBytes creates an integer value, + // but mant represents the mantissa bits such that 0.5 <= mant < 1.0) + exp -= len(mant) << 3 + if len(mant) > 0 { + for msd := mant[len(mant)-1]; msd&0x80 == 0; msd <<= 1 { + exp++ + } + } + + x := constant.MakeFromBytes(mant) + switch { + case exp < 0: + d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp)) + x = constant.BinaryOp(x, token.QUO, d) + case exp > 0: + x = constant.Shift(x, token.SHL, uint(exp)) + } + + if sign < 0 { + x = constant.UnaryOp(token.SUB, x, 0) + } + return x +} + +// ---------------------------------------------------------------------------- +// Low-level decoders + +func (p *importer) tagOrIndex() int { + if p.debugFormat { + p.marker('t') + } + + return int(p.rawInt64()) +} + +func (p *importer) int() int { + x := p.int64() + if int64(int(x)) != x { + errorf("exported integer too large") + } + return int(x) +} + +func (p *importer) int64() int64 { + if p.debugFormat { + p.marker('i') + } + + return p.rawInt64() +} + +func (p *importer) path() string { + if p.debugFormat { + p.marker('p') + } + // if the path was seen before, i is its index (>= 0) + // (the empty string is at index 0) + i := p.rawInt64() + if i >= 0 { + return p.pathList[i] + } + // otherwise, i is the negative path length (< 0) + a := make([]string, -i) + for n := range a { + a[n] = p.string() + } + s := strings.Join(a, "/") + p.pathList = append(p.pathList, s) + return s +} + +func (p *importer) string() string { + if p.debugFormat { + p.marker('s') + } + // if the string was seen before, i is its index (>= 0) + // (the empty string is at index 0) + i := p.rawInt64() + if i >= 0 { + return p.strList[i] + } + // otherwise, i is the negative string length (< 0) + if n := int(-i); n <= cap(p.buf) { + p.buf = p.buf[:n] + } else { + p.buf = make([]byte, n) + } + for i := range p.buf { + p.buf[i] = p.rawByte() + } + s := string(p.buf) + p.strList = append(p.strList, s) + return s +} + +func (p *importer) marker(want byte) { + if got := p.rawByte(); got != want { + errorf("incorrect marker: got %c; want %c (pos = %d)", got, want, p.read) + } + + pos := p.read + if n := int(p.rawInt64()); n != pos { + errorf("incorrect position: got %d; want %d", n, pos) + } +} + +// rawInt64 should only be used by low-level decoders. +func (p *importer) rawInt64() int64 { + i, err := binary.ReadVarint(p) + if err != nil { + errorf("read error: %v", err) + } + return i +} + +// rawStringln should only be used to read the initial version string. +func (p *importer) rawStringln(b byte) string { + p.buf = p.buf[:0] + for b != '\n' { + p.buf = append(p.buf, b) + b = p.rawByte() + } + return string(p.buf) +} + +// needed for binary.ReadVarint in rawInt64 +func (p *importer) ReadByte() (byte, error) { + return p.rawByte(), nil +} + +// byte is the bottleneck interface for reading p.data. +// It unescapes '|' 'S' to '$' and '|' '|' to '|'. +// rawByte should only be used by low-level decoders. +func (p *importer) rawByte() byte { + b := p.data[0] + r := 1 + if b == '|' { + b = p.data[1] + r = 2 + switch b { + case 'S': + b = '$' + case '|': + // nothing to do + default: + errorf("unexpected escape sequence in export data") + } + } + p.data = p.data[r:] + p.read += r + return b + +} + +// ---------------------------------------------------------------------------- +// Export format + +// Tags. Must be < 0. +const ( + // Objects + packageTag = -(iota + 1) + constTag + typeTag + varTag + funcTag + endTag + + // Types + namedTag + arrayTag + sliceTag + dddTag + structTag + pointerTag + signatureTag + interfaceTag + mapTag + chanTag + + // Values + falseTag + trueTag + int64Tag + floatTag + fractionTag // not used by gc + complexTag + stringTag + nilTag // only used by gc (appears in exported inlined function bodies) + unknownTag // not used by gc (only appears in packages with errors) + + // Type aliases + aliasTag +) + +var predeclOnce sync.Once +var predecl []types.Type // initialized lazily + +func predeclared() []types.Type { + predeclOnce.Do(func() { + // initialize lazily to be sure that all + // elements have been initialized before + predecl = []types.Type{ // basic types + types.Typ[types.Bool], + types.Typ[types.Int], + types.Typ[types.Int8], + types.Typ[types.Int16], + types.Typ[types.Int32], + types.Typ[types.Int64], + types.Typ[types.Uint], + types.Typ[types.Uint8], + types.Typ[types.Uint16], + types.Typ[types.Uint32], + types.Typ[types.Uint64], + types.Typ[types.Uintptr], + types.Typ[types.Float32], + types.Typ[types.Float64], + types.Typ[types.Complex64], + types.Typ[types.Complex128], + types.Typ[types.String], + + // basic type aliases + types.Universe.Lookup("byte").Type(), + types.Universe.Lookup("rune").Type(), + + // error + types.Universe.Lookup("error").Type(), + + // untyped types + types.Typ[types.UntypedBool], + types.Typ[types.UntypedInt], + types.Typ[types.UntypedRune], + types.Typ[types.UntypedFloat], + types.Typ[types.UntypedComplex], + types.Typ[types.UntypedString], + types.Typ[types.UntypedNil], + + // package unsafe + types.Typ[types.UnsafePointer], + + // invalid type + types.Typ[types.Invalid], // only appears in packages with errors + + // used internally by gc; never used by this package or in .a files + anyType{}, + } + predecl = append(predecl, additionalPredeclared()...) + }) + return predecl +} + +type anyType struct{} + +func (t anyType) Underlying() types.Type { return t } +func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go new file mode 100644 index 0000000000000..f6437feb1cfde --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go @@ -0,0 +1,99 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go. + +// This file implements FindExportData. + +package gcimporter + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" +) + +func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) { + // See $GOROOT/include/ar.h. + hdr := make([]byte, 16+12+6+6+8+10+2) + _, err = io.ReadFull(r, hdr) + if err != nil { + return + } + // leave for debugging + if false { + fmt.Printf("header: %s", hdr) + } + s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10])) + length, err := strconv.Atoi(s) + size = int64(length) + if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' { + err = fmt.Errorf("invalid archive header") + return + } + name = strings.TrimSpace(string(hdr[:16])) + return +} + +// FindExportData positions the reader r at the beginning of the +// export data section of an underlying GC-created object/archive +// file by reading from it. The reader must be positioned at the +// start of the file before calling this function. The hdr result +// is the string before the export data, either "$$" or "$$B". +// The size result is the length of the export data in bytes, or -1 if not known. +func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) { + // Read first line to make sure this is an object file. + line, err := r.ReadSlice('\n') + if err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + + if string(line) == "!\n" { + // Archive file. Scan to __.PKGDEF. + var name string + if name, size, err = readGopackHeader(r); err != nil { + return + } + + // First entry should be __.PKGDEF. + if name != "__.PKGDEF" { + err = fmt.Errorf("go archive is missing __.PKGDEF") + return + } + + // Read first line of __.PKGDEF data, so that line + // is once again the first line of the input. + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + size -= int64(len(line)) + } + + // Now at __.PKGDEF in archive or still at beginning of file. + // Either way, line should begin with "go object ". + if !strings.HasPrefix(string(line), "go object ") { + err = fmt.Errorf("not a Go object file") + return + } + + // Skip over object header to export data. + // Begins after first line starting with $$. + for line[0] != '$' { + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + size -= int64(len(line)) + } + hdr = string(line) + if size < 0 { + size = -1 + } + + return +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go new file mode 100644 index 0000000000000..0372fb3a64693 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -0,0 +1,265 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file is a reduced copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go. + +// Package gcimporter provides various functions for reading +// gc-generated object files that can be used to implement the +// Importer interface defined by the Go 1.5 standard library package. +package gcimporter // import "golang.org/x/tools/internal/gcimporter" + +import ( + "bufio" + "bytes" + "fmt" + "go/build" + "go/token" + "go/types" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" +) + +const ( + // Enable debug during development: it adds some additional checks, and + // prevents errors from being recovered. + debug = false + + // If trace is set, debugging output is printed to std out. + trace = false +) + +var exportMap sync.Map // package dir → func() (string, bool) + +// lookupGorootExport returns the location of the export data +// (normally found in the build cache, but located in GOROOT/pkg +// in prior Go releases) for the package located in pkgDir. +// +// (We use the package's directory instead of its import path +// mainly to simplify handling of the packages in src/vendor +// and cmd/vendor.) +func lookupGorootExport(pkgDir string) (string, bool) { + f, ok := exportMap.Load(pkgDir) + if !ok { + var ( + listOnce sync.Once + exportPath string + ) + f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) { + listOnce.Do(func() { + cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir) + cmd.Dir = build.Default.GOROOT + var output []byte + output, err := cmd.Output() + if err != nil { + return + } + + exports := strings.Split(string(bytes.TrimSpace(output)), "\n") + if len(exports) != 1 { + return + } + + exportPath = exports[0] + }) + + return exportPath, exportPath != "" + }) + } + + return f.(func() (string, bool))() +} + +var pkgExts = [...]string{".a", ".o"} + +// FindPkg returns the filename and unique package id for an import +// path based on package information provided by build.Import (using +// the build.Default build.Context). A relative srcDir is interpreted +// relative to the current working directory. +// If no file was found, an empty filename is returned. +func FindPkg(path, srcDir string) (filename, id string) { + if path == "" { + return + } + + var noext string + switch { + default: + // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" + // Don't require the source files to be present. + if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 + srcDir = abs + } + bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) + if bp.PkgObj == "" { + var ok bool + if bp.Goroot && bp.Dir != "" { + filename, ok = lookupGorootExport(bp.Dir) + } + if !ok { + id = path // make sure we have an id to print in error message + return + } + } else { + noext = strings.TrimSuffix(bp.PkgObj, ".a") + id = bp.ImportPath + } + + case build.IsLocalImport(path): + // "./x" -> "/this/directory/x.ext", "/this/directory/x" + noext = filepath.Join(srcDir, path) + id = noext + + case filepath.IsAbs(path): + // for completeness only - go/build.Import + // does not support absolute imports + // "/x" -> "/x.ext", "/x" + noext = path + id = path + } + + if false { // for debugging + if path != id { + fmt.Printf("%s -> %s\n", path, id) + } + } + + if filename != "" { + if f, err := os.Stat(filename); err == nil && !f.IsDir() { + return + } + } + + // try extensions + for _, ext := range pkgExts { + filename = noext + ext + if f, err := os.Stat(filename); err == nil && !f.IsDir() { + return + } + } + + filename = "" // not found + return +} + +// Import imports a gc-generated package given its import path and srcDir, adds +// the corresponding package object to the packages map, and returns the object. +// The packages map must contain all packages already imported. +func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { + var rc io.ReadCloser + var filename, id string + if lookup != nil { + // With custom lookup specified, assume that caller has + // converted path to a canonical import path for use in the map. + if path == "unsafe" { + return types.Unsafe, nil + } + id = path + + // No need to re-import if the package was imported completely before. + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + f, err := lookup(path) + if err != nil { + return nil, err + } + rc = f + } else { + filename, id = FindPkg(path, srcDir) + if filename == "" { + if path == "unsafe" { + return types.Unsafe, nil + } + return nil, fmt.Errorf("can't find import: %q", id) + } + + // no need to re-import if the package was imported completely before + if pkg = packages[id]; pkg != nil && pkg.Complete() { + return + } + + // open file + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + // add file name to error + err = fmt.Errorf("%s: %v", filename, err) + } + }() + rc = f + } + defer rc.Close() + + var hdr string + var size int64 + buf := bufio.NewReader(rc) + if hdr, size, err = FindExportData(buf); err != nil { + return + } + + switch hdr { + case "$$B\n": + var data []byte + data, err = ioutil.ReadAll(buf) + if err != nil { + break + } + + // TODO(gri): allow clients of go/importer to provide a FileSet. + // Or, define a new standard go/types/gcexportdata package. + fset := token.NewFileSet() + + // The indexed export format starts with an 'i'; the older + // binary export format starts with a 'c', 'd', or 'v' + // (from "version"). Select appropriate importer. + if len(data) > 0 { + switch data[0] { + case 'i': + _, pkg, err := IImportData(fset, packages, data[1:], id) + return pkg, err + + case 'v', 'c', 'd': + _, pkg, err := BImportData(fset, packages, data, id) + return pkg, err + + case 'u': + _, pkg, err := UImportData(fset, packages, data[1:size], id) + return pkg, err + + default: + l := len(data) + if l > 10 { + l = 10 + } + return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) + } + } + + default: + err = fmt.Errorf("unknown export data header: %q", hdr) + } + + return +} + +func deref(typ types.Type) types.Type { + if p, _ := typ.(*types.Pointer); p != nil { + return p.Elem() + } + return typ +} + +type byPath []*types.Package + +func (a byPath) Len() int { return len(a) } +func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go new file mode 100644 index 0000000000000..7d90f00f323a0 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -0,0 +1,1056 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed binary package export. +// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; +// see that file for specification of the format. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "io" + "math/big" + "reflect" + "sort" + "strconv" + "strings" + + "golang.org/x/tools/internal/typeparams" +) + +// IExportShallow encodes "shallow" export data for the specified package. +// +// No promises are made about the encoding other than that it can be +// decoded by the same version of IIExportShallow. If you plan to save +// export data in the file system, be sure to include a cryptographic +// digest of the executable in the key to avoid version skew. +func IExportShallow(fset *token.FileSet, pkg *types.Package) ([]byte, error) { + // In principle this operation can only fail if out.Write fails, + // but that's impossible for bytes.Buffer---and as a matter of + // fact iexportCommon doesn't even check for I/O errors. + // TODO(adonovan): handle I/O errors properly. + // TODO(adonovan): use byte slices throughout, avoiding copying. + const bundle, shallow = false, true + var out bytes.Buffer + err := iexportCommon(&out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}) + return out.Bytes(), err +} + +// IImportShallow decodes "shallow" types.Package data encoded by IExportShallow +// in the same executable. This function cannot import data from +// cmd/compile or gcexportdata.Write. +func IImportShallow(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string, insert InsertType) (*types.Package, error) { + const bundle = false + pkgs, err := iimportCommon(fset, imports, data, bundle, path, insert) + if err != nil { + return nil, err + } + return pkgs[0], nil +} + +// InsertType is the type of a function that creates a types.TypeName +// object for a named type and inserts it into the scope of the +// specified Package. +type InsertType = func(pkg *types.Package, name string) + +// Current bundled export format version. Increase with each format change. +// 0: initial implementation +const bundleVersion = 0 + +// IExportData writes indexed export data for pkg to out. +// +// If no file set is provided, position info will be missing. +// The package path of the top-level package will not be recorded, +// so that calls to IImportData can override with a provided package path. +func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error { + const bundle, shallow = false, false + return iexportCommon(out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}) +} + +// IExportBundle writes an indexed export bundle for pkgs to out. +func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error { + const bundle, shallow = true, false + return iexportCommon(out, fset, bundle, shallow, iexportVersion, pkgs) +} + +func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, version int, pkgs []*types.Package) (err error) { + if !debug { + defer func() { + if e := recover(); e != nil { + if ierr, ok := e.(internalError); ok { + err = ierr + return + } + // Not an internal error; panic again. + panic(e) + } + }() + } + + p := iexporter{ + fset: fset, + version: version, + shallow: shallow, + allPkgs: map[*types.Package]bool{}, + stringIndex: map[string]uint64{}, + declIndex: map[types.Object]uint64{}, + tparamNames: map[types.Object]string{}, + typIndex: map[types.Type]uint64{}, + } + if !bundle { + p.localpkg = pkgs[0] + } + + for i, pt := range predeclared() { + p.typIndex[pt] = uint64(i) + } + if len(p.typIndex) > predeclReserved { + panic(internalErrorf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved)) + } + + // Initialize work queue with exported declarations. + for _, pkg := range pkgs { + scope := pkg.Scope() + for _, name := range scope.Names() { + if token.IsExported(name) { + p.pushDecl(scope.Lookup(name)) + } + } + + if bundle { + // Ensure pkg and its imports are included in the index. + p.allPkgs[pkg] = true + for _, imp := range pkg.Imports() { + p.allPkgs[imp] = true + } + } + } + + // Loop until no more work. + for !p.declTodo.empty() { + p.doDecl(p.declTodo.popHead()) + } + + // Append indices to data0 section. + dataLen := uint64(p.data0.Len()) + w := p.newWriter() + w.writeIndex(p.declIndex) + + if bundle { + w.uint64(uint64(len(pkgs))) + for _, pkg := range pkgs { + w.pkg(pkg) + imps := pkg.Imports() + w.uint64(uint64(len(imps))) + for _, imp := range imps { + w.pkg(imp) + } + } + } + w.flush() + + // Assemble header. + var hdr intWriter + if bundle { + hdr.uint64(bundleVersion) + } + hdr.uint64(uint64(p.version)) + hdr.uint64(uint64(p.strings.Len())) + hdr.uint64(dataLen) + + // Flush output. + io.Copy(out, &hdr) + io.Copy(out, &p.strings) + io.Copy(out, &p.data0) + + return nil +} + +// writeIndex writes out an object index. mainIndex indicates whether +// we're writing out the main index, which is also read by +// non-compiler tools and includes a complete package description +// (i.e., name and height). +func (w *exportWriter) writeIndex(index map[types.Object]uint64) { + type pkgObj struct { + obj types.Object + name string // qualified name; differs from obj.Name for type params + } + // Build a map from packages to objects from that package. + pkgObjs := map[*types.Package][]pkgObj{} + + // For the main index, make sure to include every package that + // we reference, even if we're not exporting (or reexporting) + // any symbols from it. + if w.p.localpkg != nil { + pkgObjs[w.p.localpkg] = nil + } + for pkg := range w.p.allPkgs { + pkgObjs[pkg] = nil + } + + for obj := range index { + name := w.p.exportName(obj) + pkgObjs[obj.Pkg()] = append(pkgObjs[obj.Pkg()], pkgObj{obj, name}) + } + + var pkgs []*types.Package + for pkg, objs := range pkgObjs { + pkgs = append(pkgs, pkg) + + sort.Slice(objs, func(i, j int) bool { + return objs[i].name < objs[j].name + }) + } + + sort.Slice(pkgs, func(i, j int) bool { + return w.exportPath(pkgs[i]) < w.exportPath(pkgs[j]) + }) + + w.uint64(uint64(len(pkgs))) + for _, pkg := range pkgs { + w.string(w.exportPath(pkg)) + w.string(pkg.Name()) + w.uint64(uint64(0)) // package height is not needed for go/types + + objs := pkgObjs[pkg] + w.uint64(uint64(len(objs))) + for _, obj := range objs { + w.string(obj.name) + w.uint64(index[obj.obj]) + } + } +} + +// exportName returns the 'exported' name of an object. It differs from +// obj.Name() only for type parameters (see tparamExportName for details). +func (p *iexporter) exportName(obj types.Object) (res string) { + if name := p.tparamNames[obj]; name != "" { + return name + } + return obj.Name() +} + +type iexporter struct { + fset *token.FileSet + out *bytes.Buffer + version int + + shallow bool // don't put types from other packages in the index + localpkg *types.Package // (nil in bundle mode) + + // allPkgs tracks all packages that have been referenced by + // the export data, so we can ensure to include them in the + // main index. + allPkgs map[*types.Package]bool + + declTodo objQueue + + strings intWriter + stringIndex map[string]uint64 + + data0 intWriter + declIndex map[types.Object]uint64 + tparamNames map[types.Object]string // typeparam->exported name + typIndex map[types.Type]uint64 + + indent int // for tracing support +} + +func (p *iexporter) trace(format string, args ...interface{}) { + if !trace { + // Call sites should also be guarded, but having this check here allows + // easily enabling/disabling debug trace statements. + return + } + fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...) +} + +// stringOff returns the offset of s within the string section. +// If not already present, it's added to the end. +func (p *iexporter) stringOff(s string) uint64 { + off, ok := p.stringIndex[s] + if !ok { + off = uint64(p.strings.Len()) + p.stringIndex[s] = off + + p.strings.uint64(uint64(len(s))) + p.strings.WriteString(s) + } + return off +} + +// pushDecl adds n to the declaration work queue, if not already present. +func (p *iexporter) pushDecl(obj types.Object) { + // Package unsafe is known to the compiler and predeclared. + // Caller should not ask us to do export it. + if obj.Pkg() == types.Unsafe { + panic("cannot export package unsafe") + } + + // Shallow export data: don't index decls from other packages. + if p.shallow && obj.Pkg() != p.localpkg { + return + } + + if _, ok := p.declIndex[obj]; ok { + return + } + + p.declIndex[obj] = ^uint64(0) // mark obj present in work queue + p.declTodo.pushTail(obj) +} + +// exportWriter handles writing out individual data section chunks. +type exportWriter struct { + p *iexporter + + data intWriter + currPkg *types.Package + prevFile string + prevLine int64 + prevColumn int64 +} + +func (w *exportWriter) exportPath(pkg *types.Package) string { + if pkg == w.p.localpkg { + return "" + } + return pkg.Path() +} + +func (p *iexporter) doDecl(obj types.Object) { + if trace { + p.trace("exporting decl %v (%T)", obj, obj) + p.indent++ + defer func() { + p.indent-- + p.trace("=> %s", obj) + }() + } + w := p.newWriter() + w.setPkg(obj.Pkg(), false) + + switch obj := obj.(type) { + case *types.Var: + w.tag('V') + w.pos(obj.Pos()) + w.typ(obj.Type(), obj.Pkg()) + + case *types.Func: + sig, _ := obj.Type().(*types.Signature) + if sig.Recv() != nil { + panic(internalErrorf("unexpected method: %v", sig)) + } + + // Function. + if typeparams.ForSignature(sig).Len() == 0 { + w.tag('F') + } else { + w.tag('G') + } + w.pos(obj.Pos()) + // The tparam list of the function type is the declaration of the type + // params. So, write out the type params right now. Then those type params + // will be referenced via their type offset (via typOff) in all other + // places in the signature and function where they are used. + // + // While importing the type parameters, tparamList computes and records + // their export name, so that it can be later used when writing the index. + if tparams := typeparams.ForSignature(sig); tparams.Len() > 0 { + w.tparamList(obj.Name(), tparams, obj.Pkg()) + } + w.signature(sig) + + case *types.Const: + w.tag('C') + w.pos(obj.Pos()) + w.value(obj.Type(), obj.Val()) + + case *types.TypeName: + t := obj.Type() + + if tparam, ok := t.(*typeparams.TypeParam); ok { + w.tag('P') + w.pos(obj.Pos()) + constraint := tparam.Constraint() + if p.version >= iexportVersionGo1_18 { + implicit := false + if iface, _ := constraint.(*types.Interface); iface != nil { + implicit = typeparams.IsImplicit(iface) + } + w.bool(implicit) + } + w.typ(constraint, obj.Pkg()) + break + } + + if obj.IsAlias() { + w.tag('A') + w.pos(obj.Pos()) + w.typ(t, obj.Pkg()) + break + } + + // Defined type. + named, ok := t.(*types.Named) + if !ok { + panic(internalErrorf("%s is not a defined type", t)) + } + + if typeparams.ForNamed(named).Len() == 0 { + w.tag('T') + } else { + w.tag('U') + } + w.pos(obj.Pos()) + + if typeparams.ForNamed(named).Len() > 0 { + // While importing the type parameters, tparamList computes and records + // their export name, so that it can be later used when writing the index. + w.tparamList(obj.Name(), typeparams.ForNamed(named), obj.Pkg()) + } + + underlying := obj.Type().Underlying() + w.typ(underlying, obj.Pkg()) + + if types.IsInterface(t) { + break + } + + n := named.NumMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := named.Method(i) + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + + // Receiver type parameters are type arguments of the receiver type, so + // their name must be qualified before exporting recv. + if rparams := typeparams.RecvTypeParams(sig); rparams.Len() > 0 { + prefix := obj.Name() + "." + m.Name() + for i := 0; i < rparams.Len(); i++ { + rparam := rparams.At(i) + name := tparamExportName(prefix, rparam) + w.p.tparamNames[rparam.Obj()] = name + } + } + w.param(sig.Recv()) + w.signature(sig) + } + + default: + panic(internalErrorf("unexpected object: %v", obj)) + } + + p.declIndex[obj] = w.flush() +} + +func (w *exportWriter) tag(tag byte) { + w.data.WriteByte(tag) +} + +func (w *exportWriter) pos(pos token.Pos) { + if w.p.version >= iexportVersionPosCol { + w.posV1(pos) + } else { + w.posV0(pos) + } +} + +func (w *exportWriter) posV1(pos token.Pos) { + if w.p.fset == nil { + w.int64(0) + return + } + + p := w.p.fset.Position(pos) + file := p.Filename + line := int64(p.Line) + column := int64(p.Column) + + deltaColumn := (column - w.prevColumn) << 1 + deltaLine := (line - w.prevLine) << 1 + + if file != w.prevFile { + deltaLine |= 1 + } + if deltaLine != 0 { + deltaColumn |= 1 + } + + w.int64(deltaColumn) + if deltaColumn&1 != 0 { + w.int64(deltaLine) + if deltaLine&1 != 0 { + w.string(file) + } + } + + w.prevFile = file + w.prevLine = line + w.prevColumn = column +} + +func (w *exportWriter) posV0(pos token.Pos) { + if w.p.fset == nil { + w.int64(0) + return + } + + p := w.p.fset.Position(pos) + file := p.Filename + line := int64(p.Line) + + // When file is the same as the last position (common case), + // we can save a few bytes by delta encoding just the line + // number. + // + // Note: Because data objects may be read out of order (or not + // at all), we can only apply delta encoding within a single + // object. This is handled implicitly by tracking prevFile and + // prevLine as fields of exportWriter. + + if file == w.prevFile { + delta := line - w.prevLine + w.int64(delta) + if delta == deltaNewFile { + w.int64(-1) + } + } else { + w.int64(deltaNewFile) + w.int64(line) // line >= 0 + w.string(file) + w.prevFile = file + } + w.prevLine = line +} + +func (w *exportWriter) pkg(pkg *types.Package) { + // Ensure any referenced packages are declared in the main index. + w.p.allPkgs[pkg] = true + + w.string(w.exportPath(pkg)) +} + +func (w *exportWriter) qualifiedType(obj *types.TypeName) { + name := w.p.exportName(obj) + + // Ensure any referenced declarations are written out too. + w.p.pushDecl(obj) + w.string(name) + w.pkg(obj.Pkg()) +} + +func (w *exportWriter) typ(t types.Type, pkg *types.Package) { + w.data.uint64(w.p.typOff(t, pkg)) +} + +func (p *iexporter) newWriter() *exportWriter { + return &exportWriter{p: p} +} + +func (w *exportWriter) flush() uint64 { + off := uint64(w.p.data0.Len()) + io.Copy(&w.p.data0, &w.data) + return off +} + +func (p *iexporter) typOff(t types.Type, pkg *types.Package) uint64 { + off, ok := p.typIndex[t] + if !ok { + w := p.newWriter() + w.doTyp(t, pkg) + off = predeclReserved + w.flush() + p.typIndex[t] = off + } + return off +} + +func (w *exportWriter) startType(k itag) { + w.data.uint64(uint64(k)) +} + +func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { + if trace { + w.p.trace("exporting type %s (%T)", t, t) + w.p.indent++ + defer func() { + w.p.indent-- + w.p.trace("=> %s", t) + }() + } + switch t := t.(type) { + case *types.Named: + if targs := typeparams.NamedTypeArgs(t); targs.Len() > 0 { + w.startType(instanceType) + // TODO(rfindley): investigate if this position is correct, and if it + // matters. + w.pos(t.Obj().Pos()) + w.typeList(targs, pkg) + w.typ(typeparams.NamedTypeOrigin(t), pkg) + return + } + w.startType(definedType) + w.qualifiedType(t.Obj()) + + case *typeparams.TypeParam: + w.startType(typeParamType) + w.qualifiedType(t.Obj()) + + case *types.Pointer: + w.startType(pointerType) + w.typ(t.Elem(), pkg) + + case *types.Slice: + w.startType(sliceType) + w.typ(t.Elem(), pkg) + + case *types.Array: + w.startType(arrayType) + w.uint64(uint64(t.Len())) + w.typ(t.Elem(), pkg) + + case *types.Chan: + w.startType(chanType) + // 1 RecvOnly; 2 SendOnly; 3 SendRecv + var dir uint64 + switch t.Dir() { + case types.RecvOnly: + dir = 1 + case types.SendOnly: + dir = 2 + case types.SendRecv: + dir = 3 + } + w.uint64(dir) + w.typ(t.Elem(), pkg) + + case *types.Map: + w.startType(mapType) + w.typ(t.Key(), pkg) + w.typ(t.Elem(), pkg) + + case *types.Signature: + w.startType(signatureType) + w.setPkg(pkg, true) + w.signature(t) + + case *types.Struct: + w.startType(structType) + n := t.NumFields() + if n > 0 { + w.setPkg(t.Field(0).Pkg(), true) // qualifying package for field objects + } else { + w.setPkg(pkg, true) + } + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + f := t.Field(i) + w.pos(f.Pos()) + w.string(f.Name()) // unexported fields implicitly qualified by prior setPkg + w.typ(f.Type(), pkg) + w.bool(f.Anonymous()) + w.string(t.Tag(i)) // note (or tag) + } + + case *types.Interface: + w.startType(interfaceType) + w.setPkg(pkg, true) + + n := t.NumEmbeddeds() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + ft := t.EmbeddedType(i) + tPkg := pkg + if named, _ := ft.(*types.Named); named != nil { + w.pos(named.Obj().Pos()) + } else { + w.pos(token.NoPos) + } + w.typ(ft, tPkg) + } + + n = t.NumExplicitMethods() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + m := t.ExplicitMethod(i) + w.pos(m.Pos()) + w.string(m.Name()) + sig, _ := m.Type().(*types.Signature) + w.signature(sig) + } + + case *typeparams.Union: + w.startType(unionType) + nt := t.Len() + w.uint64(uint64(nt)) + for i := 0; i < nt; i++ { + term := t.Term(i) + w.bool(term.Tilde()) + w.typ(term.Type(), pkg) + } + + default: + panic(internalErrorf("unexpected type: %v, %v", t, reflect.TypeOf(t))) + } +} + +func (w *exportWriter) setPkg(pkg *types.Package, write bool) { + if write { + w.pkg(pkg) + } + + w.currPkg = pkg +} + +func (w *exportWriter) signature(sig *types.Signature) { + w.paramList(sig.Params()) + w.paramList(sig.Results()) + if sig.Params().Len() > 0 { + w.bool(sig.Variadic()) + } +} + +func (w *exportWriter) typeList(ts *typeparams.TypeList, pkg *types.Package) { + w.uint64(uint64(ts.Len())) + for i := 0; i < ts.Len(); i++ { + w.typ(ts.At(i), pkg) + } +} + +func (w *exportWriter) tparamList(prefix string, list *typeparams.TypeParamList, pkg *types.Package) { + ll := uint64(list.Len()) + w.uint64(ll) + for i := 0; i < list.Len(); i++ { + tparam := list.At(i) + // Set the type parameter exportName before exporting its type. + exportName := tparamExportName(prefix, tparam) + w.p.tparamNames[tparam.Obj()] = exportName + w.typ(list.At(i), pkg) + } +} + +const blankMarker = "$" + +// tparamExportName returns the 'exported' name of a type parameter, which +// differs from its actual object name: it is prefixed with a qualifier, and +// blank type parameter names are disambiguated by their index in the type +// parameter list. +func tparamExportName(prefix string, tparam *typeparams.TypeParam) string { + assert(prefix != "") + name := tparam.Obj().Name() + if name == "_" { + name = blankMarker + strconv.Itoa(tparam.Index()) + } + return prefix + "." + name +} + +// tparamName returns the real name of a type parameter, after stripping its +// qualifying prefix and reverting blank-name encoding. See tparamExportName +// for details. +func tparamName(exportName string) string { + // Remove the "path" from the type param name that makes it unique. + ix := strings.LastIndex(exportName, ".") + if ix < 0 { + errorf("malformed type parameter export name %s: missing prefix", exportName) + } + name := exportName[ix+1:] + if strings.HasPrefix(name, blankMarker) { + return "_" + } + return name +} + +func (w *exportWriter) paramList(tup *types.Tuple) { + n := tup.Len() + w.uint64(uint64(n)) + for i := 0; i < n; i++ { + w.param(tup.At(i)) + } +} + +func (w *exportWriter) param(obj types.Object) { + w.pos(obj.Pos()) + w.localIdent(obj) + w.typ(obj.Type(), obj.Pkg()) +} + +func (w *exportWriter) value(typ types.Type, v constant.Value) { + w.typ(typ, nil) + if w.p.version >= iexportVersionGo1_18 { + w.int64(int64(v.Kind())) + } + + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { + case types.IsBoolean: + w.bool(constant.BoolVal(v)) + case types.IsInteger: + var i big.Int + if i64, exact := constant.Int64Val(v); exact { + i.SetInt64(i64) + } else if ui64, exact := constant.Uint64Val(v); exact { + i.SetUint64(ui64) + } else { + i.SetString(v.ExactString(), 10) + } + w.mpint(&i, typ) + case types.IsFloat: + f := constantToFloat(v) + w.mpfloat(f, typ) + case types.IsComplex: + w.mpfloat(constantToFloat(constant.Real(v)), typ) + w.mpfloat(constantToFloat(constant.Imag(v)), typ) + case types.IsString: + w.string(constant.StringVal(v)) + default: + if b.Kind() == types.Invalid { + // package contains type errors + break + } + panic(internalErrorf("unexpected type %v (%v)", typ, typ.Underlying())) + } +} + +// constantToFloat converts a constant.Value with kind constant.Float to a +// big.Float. +func constantToFloat(x constant.Value) *big.Float { + x = constant.ToFloat(x) + // Use the same floating-point precision (512) as cmd/compile + // (see Mpprec in cmd/compile/internal/gc/mpfloat.go). + const mpprec = 512 + var f big.Float + f.SetPrec(mpprec) + if v, exact := constant.Float64Val(x); exact { + // float64 + f.SetFloat64(v) + } else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int { + // TODO(gri): add big.Rat accessor to constant.Value. + n := valueToRat(num) + d := valueToRat(denom) + f.SetRat(n.Quo(n, d)) + } else { + // Value too large to represent as a fraction => inaccessible. + // TODO(gri): add big.Float accessor to constant.Value. + _, ok := f.SetString(x.ExactString()) + assert(ok) + } + return &f +} + +// mpint exports a multi-precision integer. +// +// For unsigned types, small values are written out as a single +// byte. Larger values are written out as a length-prefixed big-endian +// byte string, where the length prefix is encoded as its complement. +// For example, bytes 0, 1, and 2 directly represent the integer +// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-, +// 2-, and 3-byte big-endian string follow. +// +// Encoding for signed types use the same general approach as for +// unsigned types, except small values use zig-zag encoding and the +// bottom bit of length prefix byte for large values is reserved as a +// sign bit. +// +// The exact boundary between small and large encodings varies +// according to the maximum number of bytes needed to encode a value +// of type typ. As a special case, 8-bit types are always encoded as a +// single byte. +// +// TODO(mdempsky): Is this level of complexity really worthwhile? +func (w *exportWriter) mpint(x *big.Int, typ types.Type) { + basic, ok := typ.Underlying().(*types.Basic) + if !ok { + panic(internalErrorf("unexpected type %v (%T)", typ.Underlying(), typ.Underlying())) + } + + signed, maxBytes := intSize(basic) + + negative := x.Sign() < 0 + if !signed && negative { + panic(internalErrorf("negative unsigned integer; type %v, value %v", typ, x)) + } + + b := x.Bytes() + if len(b) > 0 && b[0] == 0 { + panic(internalErrorf("leading zeros")) + } + if uint(len(b)) > maxBytes { + panic(internalErrorf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x)) + } + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + // Check if x can use small value encoding. + if len(b) <= 1 { + var ux uint + if len(b) == 1 { + ux = uint(b[0]) + } + if signed { + ux <<= 1 + if negative { + ux-- + } + } + if ux < maxSmall { + w.data.WriteByte(byte(ux)) + return + } + } + + n := 256 - uint(len(b)) + if signed { + n = 256 - 2*uint(len(b)) + if negative { + n |= 1 + } + } + if n < maxSmall || n >= 256 { + panic(internalErrorf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n)) + } + + w.data.WriteByte(byte(n)) + w.data.Write(b) +} + +// mpfloat exports a multi-precision floating point number. +// +// The number's value is decomposed into mantissa × 2**exponent, where +// mantissa is an integer. The value is written out as mantissa (as a +// multi-precision integer) and then the exponent, except exponent is +// omitted if mantissa is zero. +func (w *exportWriter) mpfloat(f *big.Float, typ types.Type) { + if f.IsInf() { + panic("infinite constant") + } + + // Break into f = mant × 2**exp, with 0.5 <= mant < 1. + var mant big.Float + exp := int64(f.MantExp(&mant)) + + // Scale so that mant is an integer. + prec := mant.MinPrec() + mant.SetMantExp(&mant, int(prec)) + exp -= int64(prec) + + manti, acc := mant.Int(nil) + if acc != big.Exact { + panic(internalErrorf("mantissa scaling failed for %f (%s)", f, acc)) + } + w.mpint(manti, typ) + if manti.Sign() != 0 { + w.int64(exp) + } +} + +func (w *exportWriter) bool(b bool) bool { + var x uint64 + if b { + x = 1 + } + w.uint64(x) + return b +} + +func (w *exportWriter) int64(x int64) { w.data.int64(x) } +func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) } +func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) } + +func (w *exportWriter) localIdent(obj types.Object) { + // Anonymous parameters. + if obj == nil { + w.string("") + return + } + + name := obj.Name() + if name == "_" { + w.string("_") + return + } + + w.string(name) +} + +type intWriter struct { + bytes.Buffer +} + +func (w *intWriter) int64(x int64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutVarint(buf[:], x) + w.Write(buf[:n]) +} + +func (w *intWriter) uint64(x uint64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], x) + w.Write(buf[:n]) +} + +func assert(cond bool) { + if !cond { + panic("internal error: assertion failed") + } +} + +// The below is copied from go/src/cmd/compile/internal/gc/syntax.go. + +// objQueue is a FIFO queue of types.Object. The zero value of objQueue is +// a ready-to-use empty queue. +type objQueue struct { + ring []types.Object + head, tail int +} + +// empty returns true if q contains no Nodes. +func (q *objQueue) empty() bool { + return q.head == q.tail +} + +// pushTail appends n to the tail of the queue. +func (q *objQueue) pushTail(obj types.Object) { + if len(q.ring) == 0 { + q.ring = make([]types.Object, 16) + } else if q.head+len(q.ring) == q.tail { + // Grow the ring. + nring := make([]types.Object, len(q.ring)*2) + // Copy the old elements. + part := q.ring[q.head%len(q.ring):] + if q.tail-q.head <= len(part) { + part = part[:q.tail-q.head] + copy(nring, part) + } else { + pos := copy(nring, part) + copy(nring[pos:], q.ring[:q.tail%len(q.ring)]) + } + q.ring, q.head, q.tail = nring, 0, q.tail-q.head + } + + q.ring[q.tail%len(q.ring)] = obj + q.tail++ +} + +// popHead pops a node from the head of the queue. It panics if q is empty. +func (q *objQueue) popHead() types.Object { + if q.empty() { + panic("dequeue empty") + } + obj := q.ring[q.head%len(q.ring)] + q.head++ + return obj +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go new file mode 100644 index 0000000000000..a1c46965350b1 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -0,0 +1,898 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Indexed package import. +// See cmd/compile/internal/gc/iexport.go for the export data format. + +// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. + +package gcimporter + +import ( + "bytes" + "encoding/binary" + "fmt" + "go/constant" + "go/token" + "go/types" + "io" + "math/big" + "sort" + "strings" + + "golang.org/x/tools/internal/typeparams" +) + +type intReader struct { + *bytes.Reader + path string +} + +func (r *intReader) int64() int64 { + i, err := binary.ReadVarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +func (r *intReader) uint64() uint64 { + i, err := binary.ReadUvarint(r.Reader) + if err != nil { + errorf("import %q: read varint error: %v", r.path, err) + } + return i +} + +// Keep this in sync with constants in iexport.go. +const ( + iexportVersionGo1_11 = 0 + iexportVersionPosCol = 1 + iexportVersionGo1_18 = 2 + iexportVersionGenerics = 2 + + iexportVersionCurrent = 2 +) + +type ident struct { + pkg *types.Package + name string +} + +const predeclReserved = 32 + +type itag uint64 + +const ( + // Types + definedType itag = iota + pointerType + sliceType + arrayType + chanType + mapType + signatureType + structType + interfaceType + typeParamType + instanceType + unionType +) + +// IImportData imports a package from the serialized package data +// and returns 0 and a reference to the package. +// If the export data version is not recognized or the format is otherwise +// compromised, an error is returned. +func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) { + pkgs, err := iimportCommon(fset, imports, data, false, path, nil) + if err != nil { + return 0, nil, err + } + return 0, pkgs[0], nil +} + +// IImportBundle imports a set of packages from the serialized package bundle. +func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) { + return iimportCommon(fset, imports, data, true, "", nil) +} + +func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data []byte, bundle bool, path string, insert InsertType) (pkgs []*types.Package, err error) { + const currentVersion = iexportVersionCurrent + version := int64(-1) + if !debug { + defer func() { + if e := recover(); e != nil { + if bundle { + err = fmt.Errorf("%v", e) + } else if version > currentVersion { + err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e) + } else { + err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e) + } + } + }() + } + + r := &intReader{bytes.NewReader(data), path} + + if bundle { + bundleVersion := r.uint64() + switch bundleVersion { + case bundleVersion: + default: + errorf("unknown bundle format version %d", bundleVersion) + } + } + + version = int64(r.uint64()) + switch version { + case iexportVersionGo1_18, iexportVersionPosCol, iexportVersionGo1_11: + default: + if version > iexportVersionGo1_18 { + errorf("unstable iexport format version %d, just rebuild compiler and std library", version) + } else { + errorf("unknown iexport format version %d", version) + } + } + + sLen := int64(r.uint64()) + dLen := int64(r.uint64()) + + whence, _ := r.Seek(0, io.SeekCurrent) + stringData := data[whence : whence+sLen] + declData := data[whence+sLen : whence+sLen+dLen] + r.Seek(sLen+dLen, io.SeekCurrent) + + p := iimporter{ + version: int(version), + ipath: path, + insert: insert, + + stringData: stringData, + stringCache: make(map[uint64]string), + pkgCache: make(map[uint64]*types.Package), + + declData: declData, + pkgIndex: make(map[*types.Package]map[string]uint64), + typCache: make(map[uint64]types.Type), + // Separate map for typeparams, keyed by their package and unique + // name. + tparamIndex: make(map[ident]types.Type), + + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*fileInfo), + }, + } + defer p.fake.setLines() // set lines for files in fset + + for i, pt := range predeclared() { + p.typCache[uint64(i)] = pt + } + + pkgList := make([]*types.Package, r.uint64()) + for i := range pkgList { + pkgPathOff := r.uint64() + pkgPath := p.stringAt(pkgPathOff) + pkgName := p.stringAt(r.uint64()) + _ = r.uint64() // package height; unused by go/types + + if pkgPath == "" { + pkgPath = path + } + pkg := imports[pkgPath] + if pkg == nil { + pkg = types.NewPackage(pkgPath, pkgName) + imports[pkgPath] = pkg + } else if pkg.Name() != pkgName { + errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path) + } + if i == 0 && !bundle { + p.localpkg = pkg + } + + p.pkgCache[pkgPathOff] = pkg + + // Read index for package. + nameIndex := make(map[string]uint64) + nSyms := r.uint64() + // In shallow mode we don't expect an index for other packages. + assert(nSyms == 0 || p.localpkg == pkg || p.insert == nil) + for ; nSyms > 0; nSyms-- { + name := p.stringAt(r.uint64()) + nameIndex[name] = r.uint64() + } + + p.pkgIndex[pkg] = nameIndex + pkgList[i] = pkg + } + + if bundle { + pkgs = make([]*types.Package, r.uint64()) + for i := range pkgs { + pkg := p.pkgAt(r.uint64()) + imps := make([]*types.Package, r.uint64()) + for j := range imps { + imps[j] = p.pkgAt(r.uint64()) + } + pkg.SetImports(imps) + pkgs[i] = pkg + } + } else { + if len(pkgList) == 0 { + errorf("no packages found for %s", path) + panic("unreachable") + } + pkgs = pkgList[:1] + + // record all referenced packages as imports + list := append(([]*types.Package)(nil), pkgList[1:]...) + sort.Sort(byPath(list)) + pkgs[0].SetImports(list) + } + + for _, pkg := range pkgs { + if pkg.Complete() { + continue + } + + names := make([]string, 0, len(p.pkgIndex[pkg])) + for name := range p.pkgIndex[pkg] { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + p.doDecl(pkg, name) + } + + // package was imported completely and without errors + pkg.MarkComplete() + } + + // SetConstraint can't be called if the constraint type is not yet complete. + // When type params are created in the 'P' case of (*importReader).obj(), + // the associated constraint type may not be complete due to recursion. + // Therefore, we defer calling SetConstraint there, and call it here instead + // after all types are complete. + for _, d := range p.later { + typeparams.SetTypeParamConstraint(d.t, d.constraint) + } + + for _, typ := range p.interfaceList { + typ.Complete() + } + + return pkgs, nil +} + +type setConstraintArgs struct { + t *typeparams.TypeParam + constraint types.Type +} + +type iimporter struct { + version int + ipath string + + localpkg *types.Package + insert func(pkg *types.Package, name string) // "shallow" mode only + + stringData []byte + stringCache map[uint64]string + pkgCache map[uint64]*types.Package + + declData []byte + pkgIndex map[*types.Package]map[string]uint64 + typCache map[uint64]types.Type + tparamIndex map[ident]types.Type + + fake fakeFileSet + interfaceList []*types.Interface + + // Arguments for calls to SetConstraint that are deferred due to recursive types + later []setConstraintArgs + + indent int // for tracing support +} + +func (p *iimporter) trace(format string, args ...interface{}) { + if !trace { + // Call sites should also be guarded, but having this check here allows + // easily enabling/disabling debug trace statements. + return + } + fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...) +} + +func (p *iimporter) doDecl(pkg *types.Package, name string) { + if debug { + p.trace("import decl %s", name) + p.indent++ + defer func() { + p.indent-- + p.trace("=> %s", name) + }() + } + // See if we've already imported this declaration. + if obj := pkg.Scope().Lookup(name); obj != nil { + return + } + + off, ok := p.pkgIndex[pkg][name] + if !ok { + // In "shallow" mode, call back to the application to + // find the object and insert it into the package scope. + if p.insert != nil { + assert(pkg != p.localpkg) + p.insert(pkg, name) // "can't fail" + return + } + errorf("%v.%v not in index", pkg, name) + } + + r := &importReader{p: p, currPkg: pkg} + r.declReader.Reset(p.declData[off:]) + + r.obj(name) +} + +func (p *iimporter) stringAt(off uint64) string { + if s, ok := p.stringCache[off]; ok { + return s + } + + slen, n := binary.Uvarint(p.stringData[off:]) + if n <= 0 { + errorf("varint failed") + } + spos := off + uint64(n) + s := string(p.stringData[spos : spos+slen]) + p.stringCache[off] = s + return s +} + +func (p *iimporter) pkgAt(off uint64) *types.Package { + if pkg, ok := p.pkgCache[off]; ok { + return pkg + } + path := p.stringAt(off) + errorf("missing package %q in %q", path, p.ipath) + return nil +} + +func (p *iimporter) typAt(off uint64, base *types.Named) types.Type { + if t, ok := p.typCache[off]; ok && canReuse(base, t) { + return t + } + + if off < predeclReserved { + errorf("predeclared type missing from cache: %v", off) + } + + r := &importReader{p: p} + r.declReader.Reset(p.declData[off-predeclReserved:]) + t := r.doType(base) + + if canReuse(base, t) { + p.typCache[off] = t + } + return t +} + +// canReuse reports whether the type rhs on the RHS of the declaration for def +// may be re-used. +// +// Specifically, if def is non-nil and rhs is an interface type with methods, it +// may not be re-used because we have a convention of setting the receiver type +// for interface methods to def. +func canReuse(def *types.Named, rhs types.Type) bool { + if def == nil { + return true + } + iface, _ := rhs.(*types.Interface) + if iface == nil { + return true + } + // Don't use iface.Empty() here as iface may not be complete. + return iface.NumEmbeddeds() == 0 && iface.NumExplicitMethods() == 0 +} + +type importReader struct { + p *iimporter + declReader bytes.Reader + currPkg *types.Package + prevFile string + prevLine int64 + prevColumn int64 +} + +func (r *importReader) obj(name string) { + tag := r.byte() + pos := r.pos() + + switch tag { + case 'A': + typ := r.typ() + + r.declare(types.NewTypeName(pos, r.currPkg, name, typ)) + + case 'C': + typ, val := r.value() + + r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) + + case 'F', 'G': + var tparams []*typeparams.TypeParam + if tag == 'G' { + tparams = r.tparamList() + } + sig := r.signature(nil, nil, tparams) + r.declare(types.NewFunc(pos, r.currPkg, name, sig)) + + case 'T', 'U': + // Types can be recursive. We need to setup a stub + // declaration before recursing. + obj := types.NewTypeName(pos, r.currPkg, name, nil) + named := types.NewNamed(obj, nil, nil) + // Declare obj before calling r.tparamList, so the new type name is recognized + // if used in the constraint of one of its own typeparams (see #48280). + r.declare(obj) + if tag == 'U' { + tparams := r.tparamList() + typeparams.SetForNamed(named, tparams) + } + + underlying := r.p.typAt(r.uint64(), named).Underlying() + named.SetUnderlying(underlying) + + if !isInterface(underlying) { + for n := r.uint64(); n > 0; n-- { + mpos := r.pos() + mname := r.ident() + recv := r.param() + + // If the receiver has any targs, set those as the + // rparams of the method (since those are the + // typeparams being used in the method sig/body). + base := baseType(recv.Type()) + assert(base != nil) + targs := typeparams.NamedTypeArgs(base) + var rparams []*typeparams.TypeParam + if targs.Len() > 0 { + rparams = make([]*typeparams.TypeParam, targs.Len()) + for i := range rparams { + rparams[i] = targs.At(i).(*typeparams.TypeParam) + } + } + msig := r.signature(recv, rparams, nil) + + named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig)) + } + } + + case 'P': + // We need to "declare" a typeparam in order to have a name that + // can be referenced recursively (if needed) in the type param's + // bound. + if r.p.version < iexportVersionGenerics { + errorf("unexpected type param type") + } + name0 := tparamName(name) + tn := types.NewTypeName(pos, r.currPkg, name0, nil) + t := typeparams.NewTypeParam(tn, nil) + + // To handle recursive references to the typeparam within its + // bound, save the partial type in tparamIndex before reading the bounds. + id := ident{r.currPkg, name} + r.p.tparamIndex[id] = t + var implicit bool + if r.p.version >= iexportVersionGo1_18 { + implicit = r.bool() + } + constraint := r.typ() + if implicit { + iface, _ := constraint.(*types.Interface) + if iface == nil { + errorf("non-interface constraint marked implicit") + } + typeparams.MarkImplicit(iface) + } + // The constraint type may not be complete, if we + // are in the middle of a type recursion involving type + // constraints. So, we defer SetConstraint until we have + // completely set up all types in ImportData. + r.p.later = append(r.p.later, setConstraintArgs{t: t, constraint: constraint}) + + case 'V': + typ := r.typ() + + r.declare(types.NewVar(pos, r.currPkg, name, typ)) + + default: + errorf("unexpected tag: %v", tag) + } +} + +func (r *importReader) declare(obj types.Object) { + obj.Pkg().Scope().Insert(obj) +} + +func (r *importReader) value() (typ types.Type, val constant.Value) { + typ = r.typ() + if r.p.version >= iexportVersionGo1_18 { + // TODO: add support for using the kind. + _ = constant.Kind(r.int64()) + } + + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { + case types.IsBoolean: + val = constant.MakeBool(r.bool()) + + case types.IsString: + val = constant.MakeString(r.string()) + + case types.IsInteger: + var x big.Int + r.mpint(&x, b) + val = constant.Make(&x) + + case types.IsFloat: + val = r.mpfloat(b) + + case types.IsComplex: + re := r.mpfloat(b) + im := r.mpfloat(b) + val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im)) + + default: + if b.Kind() == types.Invalid { + val = constant.MakeUnknown() + return + } + errorf("unexpected type %v", typ) // panics + panic("unreachable") + } + + return +} + +func intSize(b *types.Basic) (signed bool, maxBytes uint) { + if (b.Info() & types.IsUntyped) != 0 { + return true, 64 + } + + switch b.Kind() { + case types.Float32, types.Complex64: + return true, 3 + case types.Float64, types.Complex128: + return true, 7 + } + + signed = (b.Info() & types.IsUnsigned) == 0 + switch b.Kind() { + case types.Int8, types.Uint8: + maxBytes = 1 + case types.Int16, types.Uint16: + maxBytes = 2 + case types.Int32, types.Uint32: + maxBytes = 4 + default: + maxBytes = 8 + } + + return +} + +func (r *importReader) mpint(x *big.Int, typ *types.Basic) { + signed, maxBytes := intSize(typ) + + maxSmall := 256 - maxBytes + if signed { + maxSmall = 256 - 2*maxBytes + } + if maxBytes == 1 { + maxSmall = 256 + } + + n, _ := r.declReader.ReadByte() + if uint(n) < maxSmall { + v := int64(n) + if signed { + v >>= 1 + if n&1 != 0 { + v = ^v + } + } + x.SetInt64(v) + return + } + + v := -n + if signed { + v = -(n &^ 1) >> 1 + } + if v < 1 || uint(v) > maxBytes { + errorf("weird decoding: %v, %v => %v", n, signed, v) + } + b := make([]byte, v) + io.ReadFull(&r.declReader, b) + x.SetBytes(b) + if signed && n&1 != 0 { + x.Neg(x) + } +} + +func (r *importReader) mpfloat(typ *types.Basic) constant.Value { + var mant big.Int + r.mpint(&mant, typ) + var f big.Float + f.SetInt(&mant) + if f.Sign() != 0 { + f.SetMantExp(&f, int(r.int64())) + } + return constant.Make(&f) +} + +func (r *importReader) ident() string { + return r.string() +} + +func (r *importReader) qualifiedIdent() (*types.Package, string) { + name := r.string() + pkg := r.pkg() + return pkg, name +} + +func (r *importReader) pos() token.Pos { + if r.p.version >= iexportVersionPosCol { + r.posv1() + } else { + r.posv0() + } + + if r.prevFile == "" && r.prevLine == 0 && r.prevColumn == 0 { + return token.NoPos + } + return r.p.fake.pos(r.prevFile, int(r.prevLine), int(r.prevColumn)) +} + +func (r *importReader) posv0() { + delta := r.int64() + if delta != deltaNewFile { + r.prevLine += delta + } else if l := r.int64(); l == -1 { + r.prevLine += deltaNewFile + } else { + r.prevFile = r.string() + r.prevLine = l + } +} + +func (r *importReader) posv1() { + delta := r.int64() + r.prevColumn += delta >> 1 + if delta&1 != 0 { + delta = r.int64() + r.prevLine += delta >> 1 + if delta&1 != 0 { + r.prevFile = r.string() + } + } +} + +func (r *importReader) typ() types.Type { + return r.p.typAt(r.uint64(), nil) +} + +func isInterface(t types.Type) bool { + _, ok := t.(*types.Interface) + return ok +} + +func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) } +func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } + +func (r *importReader) doType(base *types.Named) (res types.Type) { + k := r.kind() + if debug { + r.p.trace("importing type %d (base: %s)", k, base) + r.p.indent++ + defer func() { + r.p.indent-- + r.p.trace("=> %s", res) + }() + } + switch k { + default: + errorf("unexpected kind tag in %q: %v", r.p.ipath, k) + return nil + + case definedType: + pkg, name := r.qualifiedIdent() + r.p.doDecl(pkg, name) + return pkg.Scope().Lookup(name).(*types.TypeName).Type() + case pointerType: + return types.NewPointer(r.typ()) + case sliceType: + return types.NewSlice(r.typ()) + case arrayType: + n := r.uint64() + return types.NewArray(r.typ(), int64(n)) + case chanType: + dir := chanDir(int(r.uint64())) + return types.NewChan(dir, r.typ()) + case mapType: + return types.NewMap(r.typ(), r.typ()) + case signatureType: + r.currPkg = r.pkg() + return r.signature(nil, nil, nil) + + case structType: + r.currPkg = r.pkg() + + fields := make([]*types.Var, r.uint64()) + tags := make([]string, len(fields)) + for i := range fields { + fpos := r.pos() + fname := r.ident() + ftyp := r.typ() + emb := r.bool() + tag := r.string() + + fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb) + tags[i] = tag + } + return types.NewStruct(fields, tags) + + case interfaceType: + r.currPkg = r.pkg() + + embeddeds := make([]types.Type, r.uint64()) + for i := range embeddeds { + _ = r.pos() + embeddeds[i] = r.typ() + } + + methods := make([]*types.Func, r.uint64()) + for i := range methods { + mpos := r.pos() + mname := r.ident() + + // TODO(mdempsky): Matches bimport.go, but I + // don't agree with this. + var recv *types.Var + if base != nil { + recv = types.NewVar(token.NoPos, r.currPkg, "", base) + } + + msig := r.signature(recv, nil, nil) + methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig) + } + + typ := newInterface(methods, embeddeds) + r.p.interfaceList = append(r.p.interfaceList, typ) + return typ + + case typeParamType: + if r.p.version < iexportVersionGenerics { + errorf("unexpected type param type") + } + pkg, name := r.qualifiedIdent() + id := ident{pkg, name} + if t, ok := r.p.tparamIndex[id]; ok { + // We're already in the process of importing this typeparam. + return t + } + // Otherwise, import the definition of the typeparam now. + r.p.doDecl(pkg, name) + return r.p.tparamIndex[id] + + case instanceType: + if r.p.version < iexportVersionGenerics { + errorf("unexpected instantiation type") + } + // pos does not matter for instances: they are positioned on the original + // type. + _ = r.pos() + len := r.uint64() + targs := make([]types.Type, len) + for i := range targs { + targs[i] = r.typ() + } + baseType := r.typ() + // The imported instantiated type doesn't include any methods, so + // we must always use the methods of the base (orig) type. + // TODO provide a non-nil *Environment + t, _ := typeparams.Instantiate(nil, baseType, targs, false) + return t + + case unionType: + if r.p.version < iexportVersionGenerics { + errorf("unexpected instantiation type") + } + terms := make([]*typeparams.Term, r.uint64()) + for i := range terms { + terms[i] = typeparams.NewTerm(r.bool(), r.typ()) + } + return typeparams.NewUnion(terms) + } +} + +func (r *importReader) kind() itag { + return itag(r.uint64()) +} + +func (r *importReader) signature(recv *types.Var, rparams []*typeparams.TypeParam, tparams []*typeparams.TypeParam) *types.Signature { + params := r.paramList() + results := r.paramList() + variadic := params.Len() > 0 && r.bool() + return typeparams.NewSignatureType(recv, rparams, tparams, params, results, variadic) +} + +func (r *importReader) tparamList() []*typeparams.TypeParam { + n := r.uint64() + if n == 0 { + return nil + } + xs := make([]*typeparams.TypeParam, n) + for i := range xs { + // Note: the standard library importer is tolerant of nil types here, + // though would panic in SetTypeParams. + xs[i] = r.typ().(*typeparams.TypeParam) + } + return xs +} + +func (r *importReader) paramList() *types.Tuple { + xs := make([]*types.Var, r.uint64()) + for i := range xs { + xs[i] = r.param() + } + return types.NewTuple(xs...) +} + +func (r *importReader) param() *types.Var { + pos := r.pos() + name := r.ident() + typ := r.typ() + return types.NewParam(pos, r.currPkg, name, typ) +} + +func (r *importReader) bool() bool { + return r.uint64() != 0 +} + +func (r *importReader) int64() int64 { + n, err := binary.ReadVarint(&r.declReader) + if err != nil { + errorf("readVarint: %v", err) + } + return n +} + +func (r *importReader) uint64() uint64 { + n, err := binary.ReadUvarint(&r.declReader) + if err != nil { + errorf("readUvarint: %v", err) + } + return n +} + +func (r *importReader) byte() byte { + x, err := r.declReader.ReadByte() + if err != nil { + errorf("declReader.ReadByte: %v", err) + } + return x +} + +func baseType(typ types.Type) *types.Named { + // pointer receivers are never types.Named types + if p, _ := typ.(*types.Pointer); p != nil { + typ = p.Elem() + } + // receiver base types are always (possibly generic) types.Named types + n, _ := typ.(*types.Named) + return n +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go new file mode 100644 index 0000000000000..8b163e3d058ae --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go @@ -0,0 +1,22 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.11 +// +build !go1.11 + +package gcimporter + +import "go/types" + +func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { + named := make([]*types.Named, len(embeddeds)) + for i, e := range embeddeds { + var ok bool + named[i], ok = e.(*types.Named) + if !ok { + panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11") + } + } + return types.NewInterface(methods, named) +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go new file mode 100644 index 0000000000000..49984f40fd80a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go @@ -0,0 +1,14 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.11 +// +build go1.11 + +package gcimporter + +import "go/types" + +func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { + return types.NewInterfaceType(methods, embeddeds) +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go new file mode 100644 index 0000000000000..d892273efb614 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go @@ -0,0 +1,16 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package gcimporter + +import "go/types" + +const iexportVersion = iexportVersionGo1_11 + +func additionalPredeclared() []types.Type { + return nil +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go new file mode 100644 index 0000000000000..edbe6ea7041db --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go @@ -0,0 +1,37 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package gcimporter + +import "go/types" + +const iexportVersion = iexportVersionGenerics + +// additionalPredeclared returns additional predeclared types in go.1.18. +func additionalPredeclared() []types.Type { + return []types.Type{ + // comparable + types.Universe.Lookup("comparable").Type(), + + // any + types.Universe.Lookup("any").Type(), + } +} + +// See cmd/compile/internal/types.SplitVargenSuffix. +func splitVargenSuffix(name string) (base, suffix string) { + i := len(name) + for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { + i-- + } + const dot = "·" + if i >= len(dot) && name[i-len(dot):i] == dot { + i -= len(dot) + return name[:i], name[i:] + } + return name, "" +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go new file mode 100644 index 0000000000000..286bf445483d8 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go @@ -0,0 +1,10 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !(go1.18 && goexperiment.unified) +// +build !go1.18 !goexperiment.unified + +package gcimporter + +const unifiedIR = false diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go new file mode 100644 index 0000000000000..b5d69ffbe682d --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go @@ -0,0 +1,10 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 && goexperiment.unified +// +build go1.18,goexperiment.unified + +package gcimporter + +const unifiedIR = true diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go new file mode 100644 index 0000000000000..8eb20729c2ad5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go @@ -0,0 +1,19 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package gcimporter + +import ( + "fmt" + "go/token" + "go/types" +) + +func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + err = fmt.Errorf("go/tools compiled with a Go version earlier than 1.18 cannot read unified IR export data") + return +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go new file mode 100644 index 0000000000000..b285a11ce25a6 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -0,0 +1,738 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Derived from go/internal/gcimporter/ureader.go + +//go:build go1.18 +// +build go1.18 + +package gcimporter + +import ( + "go/token" + "go/types" + "strings" + + "golang.org/x/tools/internal/pkgbits" +) + +// A pkgReader holds the shared state for reading a unified IR package +// description. +type pkgReader struct { + pkgbits.PkgDecoder + + fake fakeFileSet + + ctxt *types.Context + imports map[string]*types.Package // previously imported packages, indexed by path + + // lazily initialized arrays corresponding to the unified IR + // PosBase, Pkg, and Type sections, respectively. + posBases []string // position bases (i.e., file names) + pkgs []*types.Package + typs []types.Type + + // laterFns holds functions that need to be invoked at the end of + // import reading. + laterFns []func() + // laterFors is used in case of 'type A B' to ensure that B is processed before A. + laterFors map[types.Type]int + + // ifaces holds a list of constructed Interfaces, which need to have + // Complete called after importing is done. + ifaces []*types.Interface +} + +// later adds a function to be invoked at the end of import reading. +func (pr *pkgReader) later(fn func()) { + pr.laterFns = append(pr.laterFns, fn) +} + +// See cmd/compile/internal/noder.derivedInfo. +type derivedInfo struct { + idx pkgbits.Index + needed bool +} + +// See cmd/compile/internal/noder.typeInfo. +type typeInfo struct { + idx pkgbits.Index + derived bool +} + +func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { + s := string(data) + s = s[:strings.LastIndex(s, "\n$$\n")] + input := pkgbits.NewPkgDecoder(path, s) + pkg = readUnifiedPackage(fset, nil, imports, input) + return +} + +// laterFor adds a function to be invoked at the end of import reading, and records the type that function is finishing. +func (pr *pkgReader) laterFor(t types.Type, fn func()) { + if pr.laterFors == nil { + pr.laterFors = make(map[types.Type]int) + } + pr.laterFors[t] = len(pr.laterFns) + pr.laterFns = append(pr.laterFns, fn) +} + +// readUnifiedPackage reads a package description from the given +// unified IR export data decoder. +func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[string]*types.Package, input pkgbits.PkgDecoder) *types.Package { + pr := pkgReader{ + PkgDecoder: input, + + fake: fakeFileSet{ + fset: fset, + files: make(map[string]*fileInfo), + }, + + ctxt: ctxt, + imports: imports, + + posBases: make([]string, input.NumElems(pkgbits.RelocPosBase)), + pkgs: make([]*types.Package, input.NumElems(pkgbits.RelocPkg)), + typs: make([]types.Type, input.NumElems(pkgbits.RelocType)), + } + defer pr.fake.setLines() + + r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) + pkg := r.pkg() + r.Bool() // has init + + for i, n := 0, r.Len(); i < n; i++ { + // As if r.obj(), but avoiding the Scope.Lookup call, + // to avoid eager loading of imports. + r.Sync(pkgbits.SyncObject) + assert(!r.Bool()) + r.p.objIdx(r.Reloc(pkgbits.RelocObj)) + assert(r.Len() == 0) + } + + r.Sync(pkgbits.SyncEOF) + + for _, fn := range pr.laterFns { + fn() + } + + for _, iface := range pr.ifaces { + iface.Complete() + } + + pkg.MarkComplete() + return pkg +} + +// A reader holds the state for reading a single unified IR element +// within a package. +type reader struct { + pkgbits.Decoder + + p *pkgReader + + dict *readerDict +} + +// A readerDict holds the state for type parameters that parameterize +// the current unified IR element. +type readerDict struct { + // bounds is a slice of typeInfos corresponding to the underlying + // bounds of the element's type parameters. + bounds []typeInfo + + // tparams is a slice of the constructed TypeParams for the element. + tparams []*types.TypeParam + + // devived is a slice of types derived from tparams, which may be + // instantiated while reading the current element. + derived []derivedInfo + derivedTypes []types.Type // lazily instantiated from derived +} + +func (pr *pkgReader) newReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader { + return &reader{ + Decoder: pr.NewDecoder(k, idx, marker), + p: pr, + } +} + +func (pr *pkgReader) tempReader(k pkgbits.RelocKind, idx pkgbits.Index, marker pkgbits.SyncMarker) *reader { + return &reader{ + Decoder: pr.TempDecoder(k, idx, marker), + p: pr, + } +} + +func (pr *pkgReader) retireReader(r *reader) { + pr.RetireDecoder(&r.Decoder) +} + +// @@@ Positions + +func (r *reader) pos() token.Pos { + r.Sync(pkgbits.SyncPos) + if !r.Bool() { + return token.NoPos + } + + // TODO(mdempsky): Delta encoding. + posBase := r.posBase() + line := r.Uint() + col := r.Uint() + return r.p.fake.pos(posBase, int(line), int(col)) +} + +func (r *reader) posBase() string { + return r.p.posBaseIdx(r.Reloc(pkgbits.RelocPosBase)) +} + +func (pr *pkgReader) posBaseIdx(idx pkgbits.Index) string { + if b := pr.posBases[idx]; b != "" { + return b + } + + var filename string + { + r := pr.tempReader(pkgbits.RelocPosBase, idx, pkgbits.SyncPosBase) + + // Within types2, position bases have a lot more details (e.g., + // keeping track of where //line directives appeared exactly). + // + // For go/types, we just track the file name. + + filename = r.String() + + if r.Bool() { // file base + // Was: "b = token.NewTrimmedFileBase(filename, true)" + } else { // line base + pos := r.pos() + line := r.Uint() + col := r.Uint() + + // Was: "b = token.NewLineBase(pos, filename, true, line, col)" + _, _, _ = pos, line, col + } + pr.retireReader(r) + } + b := filename + pr.posBases[idx] = b + return b +} + +// @@@ Packages + +func (r *reader) pkg() *types.Package { + r.Sync(pkgbits.SyncPkg) + return r.p.pkgIdx(r.Reloc(pkgbits.RelocPkg)) +} + +func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package { + // TODO(mdempsky): Consider using some non-nil pointer to indicate + // the universe scope, so we don't need to keep re-reading it. + if pkg := pr.pkgs[idx]; pkg != nil { + return pkg + } + + pkg := pr.newReader(pkgbits.RelocPkg, idx, pkgbits.SyncPkgDef).doPkg() + pr.pkgs[idx] = pkg + return pkg +} + +func (r *reader) doPkg() *types.Package { + path := r.String() + switch path { + case "": + path = r.p.PkgPath() + case "builtin": + return nil // universe + case "unsafe": + return types.Unsafe + } + + if pkg := r.p.imports[path]; pkg != nil { + return pkg + } + + name := r.String() + + pkg := types.NewPackage(path, name) + r.p.imports[path] = pkg + + imports := make([]*types.Package, r.Len()) + for i := range imports { + imports[i] = r.pkg() + } + pkg.SetImports(flattenImports(imports)) + + return pkg +} + +// flattenImports returns the transitive closure of all imported +// packages rooted from pkgs. +func flattenImports(pkgs []*types.Package) []*types.Package { + var res []*types.Package + seen := make(map[*types.Package]struct{}) + for _, pkg := range pkgs { + if _, ok := seen[pkg]; ok { + continue + } + seen[pkg] = struct{}{} + res = append(res, pkg) + + // pkg.Imports() is already flattened. + for _, pkg := range pkg.Imports() { + if _, ok := seen[pkg]; ok { + continue + } + seen[pkg] = struct{}{} + res = append(res, pkg) + } + } + return res +} + +// @@@ Types + +func (r *reader) typ() types.Type { + return r.p.typIdx(r.typInfo(), r.dict) +} + +func (r *reader) typInfo() typeInfo { + r.Sync(pkgbits.SyncType) + if r.Bool() { + return typeInfo{idx: pkgbits.Index(r.Len()), derived: true} + } + return typeInfo{idx: r.Reloc(pkgbits.RelocType), derived: false} +} + +func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict) types.Type { + idx := info.idx + var where *types.Type + if info.derived { + where = &dict.derivedTypes[idx] + idx = dict.derived[idx].idx + } else { + where = &pr.typs[idx] + } + + if typ := *where; typ != nil { + return typ + } + + var typ types.Type + { + r := pr.tempReader(pkgbits.RelocType, idx, pkgbits.SyncTypeIdx) + r.dict = dict + + typ = r.doTyp() + assert(typ != nil) + pr.retireReader(r) + } + // See comment in pkgReader.typIdx explaining how this happens. + if prev := *where; prev != nil { + return prev + } + + *where = typ + return typ +} + +func (r *reader) doTyp() (res types.Type) { + switch tag := pkgbits.CodeType(r.Code(pkgbits.SyncType)); tag { + default: + errorf("unhandled type tag: %v", tag) + panic("unreachable") + + case pkgbits.TypeBasic: + return types.Typ[r.Len()] + + case pkgbits.TypeNamed: + obj, targs := r.obj() + name := obj.(*types.TypeName) + if len(targs) != 0 { + t, _ := types.Instantiate(r.p.ctxt, name.Type(), targs, false) + return t + } + return name.Type() + + case pkgbits.TypeTypeParam: + return r.dict.tparams[r.Len()] + + case pkgbits.TypeArray: + len := int64(r.Uint64()) + return types.NewArray(r.typ(), len) + case pkgbits.TypeChan: + dir := types.ChanDir(r.Len()) + return types.NewChan(dir, r.typ()) + case pkgbits.TypeMap: + return types.NewMap(r.typ(), r.typ()) + case pkgbits.TypePointer: + return types.NewPointer(r.typ()) + case pkgbits.TypeSignature: + return r.signature(nil, nil, nil) + case pkgbits.TypeSlice: + return types.NewSlice(r.typ()) + case pkgbits.TypeStruct: + return r.structType() + case pkgbits.TypeInterface: + return r.interfaceType() + case pkgbits.TypeUnion: + return r.unionType() + } +} + +func (r *reader) structType() *types.Struct { + fields := make([]*types.Var, r.Len()) + var tags []string + for i := range fields { + pos := r.pos() + pkg, name := r.selector() + ftyp := r.typ() + tag := r.String() + embedded := r.Bool() + + fields[i] = types.NewField(pos, pkg, name, ftyp, embedded) + if tag != "" { + for len(tags) < i { + tags = append(tags, "") + } + tags = append(tags, tag) + } + } + return types.NewStruct(fields, tags) +} + +func (r *reader) unionType() *types.Union { + terms := make([]*types.Term, r.Len()) + for i := range terms { + terms[i] = types.NewTerm(r.Bool(), r.typ()) + } + return types.NewUnion(terms) +} + +func (r *reader) interfaceType() *types.Interface { + methods := make([]*types.Func, r.Len()) + embeddeds := make([]types.Type, r.Len()) + implicit := len(methods) == 0 && len(embeddeds) == 1 && r.Bool() + + for i := range methods { + pos := r.pos() + pkg, name := r.selector() + mtyp := r.signature(nil, nil, nil) + methods[i] = types.NewFunc(pos, pkg, name, mtyp) + } + + for i := range embeddeds { + embeddeds[i] = r.typ() + } + + iface := types.NewInterfaceType(methods, embeddeds) + if implicit { + iface.MarkImplicit() + } + + // We need to call iface.Complete(), but if there are any embedded + // defined types, then we may not have set their underlying + // interface type yet. So we need to defer calling Complete until + // after we've called SetUnderlying everywhere. + // + // TODO(mdempsky): After CL 424876 lands, it should be safe to call + // iface.Complete() immediately. + r.p.ifaces = append(r.p.ifaces, iface) + + return iface +} + +func (r *reader) signature(recv *types.Var, rtparams, tparams []*types.TypeParam) *types.Signature { + r.Sync(pkgbits.SyncSignature) + + params := r.params() + results := r.params() + variadic := r.Bool() + + return types.NewSignatureType(recv, rtparams, tparams, params, results, variadic) +} + +func (r *reader) params() *types.Tuple { + r.Sync(pkgbits.SyncParams) + + params := make([]*types.Var, r.Len()) + for i := range params { + params[i] = r.param() + } + + return types.NewTuple(params...) +} + +func (r *reader) param() *types.Var { + r.Sync(pkgbits.SyncParam) + + pos := r.pos() + pkg, name := r.localIdent() + typ := r.typ() + + return types.NewParam(pos, pkg, name, typ) +} + +// @@@ Objects + +func (r *reader) obj() (types.Object, []types.Type) { + r.Sync(pkgbits.SyncObject) + + assert(!r.Bool()) + + pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj)) + obj := pkgScope(pkg).Lookup(name) + + targs := make([]types.Type, r.Len()) + for i := range targs { + targs[i] = r.typ() + } + + return obj, targs +} + +func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { + + var objPkg *types.Package + var objName string + var tag pkgbits.CodeObj + { + rname := pr.tempReader(pkgbits.RelocName, idx, pkgbits.SyncObject1) + + objPkg, objName = rname.qualifiedIdent() + assert(objName != "") + + tag = pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj)) + pr.retireReader(rname) + } + + if tag == pkgbits.ObjStub { + assert(objPkg == nil || objPkg == types.Unsafe) + return objPkg, objName + } + + // Ignore local types promoted to global scope (#55110). + if _, suffix := splitVargenSuffix(objName); suffix != "" { + return objPkg, objName + } + + if objPkg.Scope().Lookup(objName) == nil { + dict := pr.objDictIdx(idx) + + r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1) + r.dict = dict + + declare := func(obj types.Object) { + objPkg.Scope().Insert(obj) + } + + switch tag { + default: + panic("weird") + + case pkgbits.ObjAlias: + pos := r.pos() + typ := r.typ() + declare(types.NewTypeName(pos, objPkg, objName, typ)) + + case pkgbits.ObjConst: + pos := r.pos() + typ := r.typ() + val := r.Value() + declare(types.NewConst(pos, objPkg, objName, typ, val)) + + case pkgbits.ObjFunc: + pos := r.pos() + tparams := r.typeParamNames() + sig := r.signature(nil, nil, tparams) + declare(types.NewFunc(pos, objPkg, objName, sig)) + + case pkgbits.ObjType: + pos := r.pos() + + obj := types.NewTypeName(pos, objPkg, objName, nil) + named := types.NewNamed(obj, nil, nil) + declare(obj) + + named.SetTypeParams(r.typeParamNames()) + + setUnderlying := func(underlying types.Type) { + // If the underlying type is an interface, we need to + // duplicate its methods so we can replace the receiver + // parameter's type (#49906). + if iface, ok := underlying.(*types.Interface); ok && iface.NumExplicitMethods() != 0 { + methods := make([]*types.Func, iface.NumExplicitMethods()) + for i := range methods { + fn := iface.ExplicitMethod(i) + sig := fn.Type().(*types.Signature) + + recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named) + methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic())) + } + + embeds := make([]types.Type, iface.NumEmbeddeds()) + for i := range embeds { + embeds[i] = iface.EmbeddedType(i) + } + + newIface := types.NewInterfaceType(methods, embeds) + r.p.ifaces = append(r.p.ifaces, newIface) + underlying = newIface + } + + named.SetUnderlying(underlying) + } + + // Since go.dev/cl/455279, we can assume rhs.Underlying() will + // always be non-nil. However, to temporarily support users of + // older snapshot releases, we continue to fallback to the old + // behavior for now. + // + // TODO(mdempsky): Remove fallback code and simplify after + // allowing time for snapshot users to upgrade. + rhs := r.typ() + if underlying := rhs.Underlying(); underlying != nil { + setUnderlying(underlying) + } else { + pk := r.p + pk.laterFor(named, func() { + // First be sure that the rhs is initialized, if it needs to be initialized. + delete(pk.laterFors, named) // prevent cycles + if i, ok := pk.laterFors[rhs]; ok { + f := pk.laterFns[i] + pk.laterFns[i] = func() {} // function is running now, so replace it with a no-op + f() // initialize RHS + } + setUnderlying(rhs.Underlying()) + }) + } + + for i, n := 0, r.Len(); i < n; i++ { + named.AddMethod(r.method()) + } + + case pkgbits.ObjVar: + pos := r.pos() + typ := r.typ() + declare(types.NewVar(pos, objPkg, objName, typ)) + } + } + + return objPkg, objName +} + +func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { + + var dict readerDict + + { + r := pr.tempReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1) + if implicits := r.Len(); implicits != 0 { + errorf("unexpected object with %v implicit type parameter(s)", implicits) + } + + dict.bounds = make([]typeInfo, r.Len()) + for i := range dict.bounds { + dict.bounds[i] = r.typInfo() + } + + dict.derived = make([]derivedInfo, r.Len()) + dict.derivedTypes = make([]types.Type, len(dict.derived)) + for i := range dict.derived { + dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} + } + + pr.retireReader(r) + } + // function references follow, but reader doesn't need those + + return &dict +} + +func (r *reader) typeParamNames() []*types.TypeParam { + r.Sync(pkgbits.SyncTypeParamNames) + + // Note: This code assumes it only processes objects without + // implement type parameters. This is currently fine, because + // reader is only used to read in exported declarations, which are + // always package scoped. + + if len(r.dict.bounds) == 0 { + return nil + } + + // Careful: Type parameter lists may have cycles. To allow for this, + // we construct the type parameter list in two passes: first we + // create all the TypeNames and TypeParams, then we construct and + // set the bound type. + + r.dict.tparams = make([]*types.TypeParam, len(r.dict.bounds)) + for i := range r.dict.bounds { + pos := r.pos() + pkg, name := r.localIdent() + + tname := types.NewTypeName(pos, pkg, name, nil) + r.dict.tparams[i] = types.NewTypeParam(tname, nil) + } + + typs := make([]types.Type, len(r.dict.bounds)) + for i, bound := range r.dict.bounds { + typs[i] = r.p.typIdx(bound, r.dict) + } + + // TODO(mdempsky): This is subtle, elaborate further. + // + // We have to save tparams outside of the closure, because + // typeParamNames() can be called multiple times with the same + // dictionary instance. + // + // Also, this needs to happen later to make sure SetUnderlying has + // been called. + // + // TODO(mdempsky): Is it safe to have a single "later" slice or do + // we need to have multiple passes? See comments on CL 386002 and + // go.dev/issue/52104. + tparams := r.dict.tparams + r.p.later(func() { + for i, typ := range typs { + tparams[i].SetConstraint(typ) + } + }) + + return r.dict.tparams +} + +func (r *reader) method() *types.Func { + r.Sync(pkgbits.SyncMethod) + pos := r.pos() + pkg, name := r.selector() + + rparams := r.typeParamNames() + sig := r.signature(r.param(), rparams, nil) + + _ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go. + return types.NewFunc(pos, pkg, name, sig) +} + +func (r *reader) qualifiedIdent() (*types.Package, string) { return r.ident(pkgbits.SyncSym) } +func (r *reader) localIdent() (*types.Package, string) { return r.ident(pkgbits.SyncLocalIdent) } +func (r *reader) selector() (*types.Package, string) { return r.ident(pkgbits.SyncSelector) } + +func (r *reader) ident(marker pkgbits.SyncMarker) (*types.Package, string) { + r.Sync(marker) + return r.pkg(), r.String() +} + +// pkgScope returns pkg.Scope(). +// If pkg is nil, it returns types.Universe instead. +// +// TODO(mdempsky): Remove after x/tools can depend on Go 1.19. +func pkgScope(pkg *types.Package) *types.Scope { + if pkg != nil { + return pkg.Scope() + } + return types.Universe +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go new file mode 100644 index 0000000000000..d50551693f3dc --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -0,0 +1,356 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gocommand is a helper for calling the go command. +package gocommand + +import ( + "bytes" + "context" + "fmt" + "io" + "log" + "os" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" + + exec "golang.org/x/sys/execabs" + + "golang.org/x/tools/internal/event" +) + +// An Runner will run go command invocations and serialize +// them if it sees a concurrency error. +type Runner struct { + // once guards the runner initialization. + once sync.Once + + // inFlight tracks available workers. + inFlight chan struct{} + + // serialized guards the ability to run a go command serially, + // to avoid deadlocks when claiming workers. + serialized chan struct{} +} + +const maxInFlight = 10 + +func (runner *Runner) initialize() { + runner.once.Do(func() { + runner.inFlight = make(chan struct{}, maxInFlight) + runner.serialized = make(chan struct{}, 1) + }) +} + +// 1.13: go: updates to go.mod needed, but contents have changed +// 1.14: go: updating go.mod: existing contents have changed since last read +var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`) + +// Run is a convenience wrapper around RunRaw. +// It returns only stdout and a "friendly" error. +func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) { + stdout, _, friendly, _ := runner.RunRaw(ctx, inv) + return stdout, friendly +} + +// RunPiped runs the invocation serially, always waiting for any concurrent +// invocations to complete first. +func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error { + _, err := runner.runPiped(ctx, inv, stdout, stderr) + return err +} + +// RunRaw runs the invocation, serializing requests only if they fight over +// go.mod changes. +func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + // Make sure the runner is always initialized. + runner.initialize() + + // First, try to run the go command concurrently. + stdout, stderr, friendlyErr, err := runner.runConcurrent(ctx, inv) + + // If we encounter a load concurrency error, we need to retry serially. + if friendlyErr == nil || !modConcurrencyError.MatchString(friendlyErr.Error()) { + return stdout, stderr, friendlyErr, err + } + event.Error(ctx, "Load concurrency error, will retry serially", err) + + // Run serially by calling runPiped. + stdout.Reset() + stderr.Reset() + friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr) + return stdout, stderr, friendlyErr, err +} + +func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) { + // Wait for 1 worker to become available. + select { + case <-ctx.Done(): + return nil, nil, nil, ctx.Err() + case runner.inFlight <- struct{}{}: + defer func() { <-runner.inFlight }() + } + + stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{} + friendlyErr, err := inv.runWithFriendlyError(ctx, stdout, stderr) + return stdout, stderr, friendlyErr, err +} + +func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) { + // Make sure the runner is always initialized. + runner.initialize() + + // Acquire the serialization lock. This avoids deadlocks between two + // runPiped commands. + select { + case <-ctx.Done(): + return nil, ctx.Err() + case runner.serialized <- struct{}{}: + defer func() { <-runner.serialized }() + } + + // Wait for all in-progress go commands to return before proceeding, + // to avoid load concurrency errors. + for i := 0; i < maxInFlight; i++ { + select { + case <-ctx.Done(): + return nil, ctx.Err() + case runner.inFlight <- struct{}{}: + // Make sure we always "return" any workers we took. + defer func() { <-runner.inFlight }() + } + } + + return inv.runWithFriendlyError(ctx, stdout, stderr) +} + +// An Invocation represents a call to the go command. +type Invocation struct { + Verb string + Args []string + BuildFlags []string + + // If ModFlag is set, the go command is invoked with -mod=ModFlag. + ModFlag string + + // If ModFile is set, the go command is invoked with -modfile=ModFile. + ModFile string + + // If Overlay is set, the go command is invoked with -overlay=Overlay. + Overlay string + + // If CleanEnv is set, the invocation will run only with the environment + // in Env, not starting with os.Environ. + CleanEnv bool + Env []string + WorkingDir string + Logf func(format string, args ...interface{}) +} + +func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io.Writer) (friendlyError error, rawError error) { + rawError = i.run(ctx, stdout, stderr) + if rawError != nil { + friendlyError = rawError + // Check for 'go' executable not being found. + if ee, ok := rawError.(*exec.Error); ok && ee.Err == exec.ErrNotFound { + friendlyError = fmt.Errorf("go command required, not found: %v", ee) + } + if ctx.Err() != nil { + friendlyError = ctx.Err() + } + friendlyError = fmt.Errorf("err: %v: stderr: %s", friendlyError, stderr) + } + return +} + +func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { + log := i.Logf + if log == nil { + log = func(string, ...interface{}) {} + } + + goArgs := []string{i.Verb} + + appendModFile := func() { + if i.ModFile != "" { + goArgs = append(goArgs, "-modfile="+i.ModFile) + } + } + appendModFlag := func() { + if i.ModFlag != "" { + goArgs = append(goArgs, "-mod="+i.ModFlag) + } + } + appendOverlayFlag := func() { + if i.Overlay != "" { + goArgs = append(goArgs, "-overlay="+i.Overlay) + } + } + + switch i.Verb { + case "env", "version": + goArgs = append(goArgs, i.Args...) + case "mod": + // mod needs the sub-verb before flags. + goArgs = append(goArgs, i.Args[0]) + appendModFile() + goArgs = append(goArgs, i.Args[1:]...) + case "get": + goArgs = append(goArgs, i.BuildFlags...) + appendModFile() + goArgs = append(goArgs, i.Args...) + + default: // notably list and build. + goArgs = append(goArgs, i.BuildFlags...) + appendModFile() + appendModFlag() + appendOverlayFlag() + goArgs = append(goArgs, i.Args...) + } + cmd := exec.Command("go", goArgs...) + cmd.Stdout = stdout + cmd.Stderr = stderr + // On darwin the cwd gets resolved to the real path, which breaks anything that + // expects the working directory to keep the original path, including the + // go command when dealing with modules. + // The Go stdlib has a special feature where if the cwd and the PWD are the + // same node then it trusts the PWD, so by setting it in the env for the child + // process we fix up all the paths returned by the go command. + if !i.CleanEnv { + cmd.Env = os.Environ() + } + cmd.Env = append(cmd.Env, i.Env...) + if i.WorkingDir != "" { + cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir) + cmd.Dir = i.WorkingDir + } + defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) + + return runCmdContext(ctx, cmd) +} + +// DebugHangingGoCommands may be set by tests to enable additional +// instrumentation (including panics) for debugging hanging Go commands. +// +// See golang/go#54461 for details. +var DebugHangingGoCommands = false + +// runCmdContext is like exec.CommandContext except it sends os.Interrupt +// before os.Kill. +func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { + if err := cmd.Start(); err != nil { + return err + } + resChan := make(chan error, 1) + go func() { + resChan <- cmd.Wait() + }() + + // If we're interested in debugging hanging Go commands, stop waiting after a + // minute and panic with interesting information. + if DebugHangingGoCommands { + select { + case err := <-resChan: + return err + case <-time.After(1 * time.Minute): + HandleHangingGoCommand(cmd.Process) + case <-ctx.Done(): + } + } else { + select { + case err := <-resChan: + return err + case <-ctx.Done(): + } + } + + // Cancelled. Interrupt and see if it ends voluntarily. + cmd.Process.Signal(os.Interrupt) + select { + case err := <-resChan: + return err + case <-time.After(time.Second): + } + + // Didn't shut down in response to interrupt. Kill it hard. + // TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT + // on certain platforms, such as unix. + if err := cmd.Process.Kill(); err != nil && DebugHangingGoCommands { + // Don't panic here as this reliably fails on windows with EINVAL. + log.Printf("error killing the Go command: %v", err) + } + + // See above: don't wait indefinitely if we're debugging hanging Go commands. + if DebugHangingGoCommands { + select { + case err := <-resChan: + return err + case <-time.After(10 * time.Second): // a shorter wait as resChan should return quickly following Kill + HandleHangingGoCommand(cmd.Process) + } + } + return <-resChan +} + +func HandleHangingGoCommand(proc *os.Process) { + switch runtime.GOOS { + case "linux", "darwin", "freebsd", "netbsd": + fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND + +The gopls test runner has detected a hanging go command. In order to debug +this, the output of ps and lsof/fstat is printed below. + +See golang/go#54461 for more details.`) + + fmt.Fprintln(os.Stderr, "\nps axo ppid,pid,command:") + fmt.Fprintln(os.Stderr, "-------------------------") + psCmd := exec.Command("ps", "axo", "ppid,pid,command") + psCmd.Stdout = os.Stderr + psCmd.Stderr = os.Stderr + if err := psCmd.Run(); err != nil { + panic(fmt.Sprintf("running ps: %v", err)) + } + + listFiles := "lsof" + if runtime.GOOS == "freebsd" || runtime.GOOS == "netbsd" { + listFiles = "fstat" + } + + fmt.Fprintln(os.Stderr, "\n"+listFiles+":") + fmt.Fprintln(os.Stderr, "-----") + listFilesCmd := exec.Command(listFiles) + listFilesCmd.Stdout = os.Stderr + listFilesCmd.Stderr = os.Stderr + if err := listFilesCmd.Run(); err != nil { + panic(fmt.Sprintf("running %s: %v", listFiles, err)) + } + } + panic(fmt.Sprintf("detected hanging go command (pid %d): see golang/go#54461 for more details", proc.Pid)) +} + +func cmdDebugStr(cmd *exec.Cmd) string { + env := make(map[string]string) + for _, kv := range cmd.Env { + split := strings.SplitN(kv, "=", 2) + if len(split) == 2 { + k, v := split[0], split[1] + env[k] = v + } + } + + var args []string + for _, arg := range cmd.Args { + quoted := strconv.Quote(arg) + if quoted[1:len(quoted)-1] != arg || strings.Contains(arg, " ") { + args = append(args, quoted) + } else { + args = append(args, arg) + } + } + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], strings.Join(args, " ")) +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/vendor.go b/vendor/golang.org/x/tools/internal/gocommand/vendor.go new file mode 100644 index 0000000000000..2d3d408c0bed3 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/vendor.go @@ -0,0 +1,109 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocommand + +import ( + "bytes" + "context" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + "time" + + "golang.org/x/mod/semver" +) + +// ModuleJSON holds information about a module. +type ModuleJSON struct { + Path string // module path + Version string // module version + Versions []string // available module versions (with -versions) + Replace *ModuleJSON // replaced by this module + Time *time.Time // time version was created + Update *ModuleJSON // available update, if any (with -u) + Main bool // is this the main module? + Indirect bool // is this module only an indirect dependency of main module? + Dir string // directory holding files for this module, if any + GoMod string // path to go.mod file used when loading this module, if any + GoVersion string // go version used in module +} + +var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`) + +// VendorEnabled reports whether vendoring is enabled. It takes a *Runner to execute Go commands +// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields, +// of which only Verb and Args are modified to run the appropriate Go command. +// Inspired by setDefaultBuildMod in modload/init.go +func VendorEnabled(ctx context.Context, inv Invocation, r *Runner) (bool, *ModuleJSON, error) { + mainMod, go114, err := getMainModuleAnd114(ctx, inv, r) + if err != nil { + return false, nil, err + } + + // We check the GOFLAGS to see if there is anything overridden or not. + inv.Verb = "env" + inv.Args = []string{"GOFLAGS"} + stdout, err := r.Run(ctx, inv) + if err != nil { + return false, nil, err + } + goflags := string(bytes.TrimSpace(stdout.Bytes())) + matches := modFlagRegexp.FindStringSubmatch(goflags) + var modFlag string + if len(matches) != 0 { + modFlag = matches[1] + } + // Don't override an explicit '-mod=' argument. + if modFlag == "vendor" { + return true, mainMod, nil + } else if modFlag != "" { + return false, nil, nil + } + if mainMod == nil || !go114 { + return false, nil, nil + } + // Check 1.14's automatic vendor mode. + if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() { + if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 { + // The Go version is at least 1.14, and a vendor directory exists. + // Set -mod=vendor by default. + return true, mainMod, nil + } + } + return false, nil, nil +} + +// getMainModuleAnd114 gets one of the main modules' information and whether the +// go command in use is 1.14+. This is the information needed to figure out +// if vendoring should be enabled. +func getMainModuleAnd114(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) { + const format = `{{.Path}} +{{.Dir}} +{{.GoMod}} +{{.GoVersion}} +{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}} +` + inv.Verb = "list" + inv.Args = []string{"-m", "-f", format} + stdout, err := r.Run(ctx, inv) + if err != nil { + return nil, false, err + } + + lines := strings.Split(stdout.String(), "\n") + if len(lines) < 5 { + return nil, false, fmt.Errorf("unexpected stdout: %q", stdout.String()) + } + mod := &ModuleJSON{ + Path: lines[0], + Dir: lines[1], + GoMod: lines[2], + GoVersion: lines[3], + Main: true, + } + return mod, lines[4] == "go1.14", nil +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/version.go b/vendor/golang.org/x/tools/internal/gocommand/version.go new file mode 100644 index 0000000000000..307a76d474ad1 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/version.go @@ -0,0 +1,81 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocommand + +import ( + "context" + "fmt" + "regexp" + "strings" +) + +// GoVersion reports the minor version number of the highest release +// tag built into the go command on the PATH. +// +// Note that this may be higher than the version of the go tool used +// to build this application, and thus the versions of the standard +// go/{scanner,parser,ast,types} packages that are linked into it. +// In that case, callers should either downgrade to the version of +// go used to build the application, or report an error that the +// application is too old to use the go command on the PATH. +func GoVersion(ctx context.Context, inv Invocation, r *Runner) (int, error) { + inv.Verb = "list" + inv.Args = []string{"-e", "-f", `{{context.ReleaseTags}}`, `--`, `unsafe`} + inv.Env = append(append([]string{}, inv.Env...), "GO111MODULE=off") + // Unset any unneeded flags, and remove them from BuildFlags, if they're + // present. + inv.ModFile = "" + inv.ModFlag = "" + var buildFlags []string + for _, flag := range inv.BuildFlags { + // Flags can be prefixed by one or two dashes. + f := strings.TrimPrefix(strings.TrimPrefix(flag, "-"), "-") + if strings.HasPrefix(f, "mod=") || strings.HasPrefix(f, "modfile=") { + continue + } + buildFlags = append(buildFlags, flag) + } + inv.BuildFlags = buildFlags + stdoutBytes, err := r.Run(ctx, inv) + if err != nil { + return 0, err + } + stdout := stdoutBytes.String() + if len(stdout) < 3 { + return 0, fmt.Errorf("bad ReleaseTags output: %q", stdout) + } + // Split up "[go1.1 go1.15]" and return highest go1.X value. + tags := strings.Fields(stdout[1 : len(stdout)-2]) + for i := len(tags) - 1; i >= 0; i-- { + var version int + if _, err := fmt.Sscanf(tags[i], "go1.%d", &version); err != nil { + continue + } + return version, nil + } + return 0, fmt.Errorf("no parseable ReleaseTags in %v", tags) +} + +// GoVersionOutput returns the complete output of the go version command. +func GoVersionOutput(ctx context.Context, inv Invocation, r *Runner) (string, error) { + inv.Verb = "version" + goVersion, err := r.Run(ctx, inv) + if err != nil { + return "", err + } + return goVersion.String(), nil +} + +// ParseGoVersionOutput extracts the Go version string +// from the output of the "go version" command. +// Given an unrecognized form, it returns an empty string. +func ParseGoVersionOutput(data string) string { + re := regexp.MustCompile(`^go version (go\S+|devel \S+)`) + m := re.FindStringSubmatch(data) + if len(m) != 2 { + return "" // unrecognized version + } + return m[1] +} diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go new file mode 100644 index 0000000000000..d9950b1f0bef9 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -0,0 +1,30 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packagesinternal exposes internal-only fields from go/packages. +package packagesinternal + +import ( + "golang.org/x/tools/internal/gocommand" +) + +var GetForTest = func(p interface{}) string { return "" } +var GetDepsErrors = func(p interface{}) []*PackageError { return nil } + +type PackageError struct { + ImportStack []string // shortest path from package named on command line to this one + Pos string // position of error (if present, file:line:col) + Err string // the error itself +} + +var GetGoCmdRunner = func(config interface{}) *gocommand.Runner { return nil } + +var SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {} + +var TypecheckCgo int +var DepsErrors int // must be set as a LoadMode to call GetDepsErrors +var ForTest int // must be set as a LoadMode to call GetForTest + +var SetModFlag = func(config interface{}, value string) {} +var SetModFile = func(config interface{}, value string) {} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/codes.go b/vendor/golang.org/x/tools/internal/pkgbits/codes.go new file mode 100644 index 0000000000000..f0cabde96eba9 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/pkgbits/codes.go @@ -0,0 +1,77 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// A Code is an enum value that can be encoded into bitstreams. +// +// Code types are preferable for enum types, because they allow +// Decoder to detect desyncs. +type Code interface { + // Marker returns the SyncMarker for the Code's dynamic type. + Marker() SyncMarker + + // Value returns the Code's ordinal value. + Value() int +} + +// A CodeVal distinguishes among go/constant.Value encodings. +type CodeVal int + +func (c CodeVal) Marker() SyncMarker { return SyncVal } +func (c CodeVal) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + ValBool CodeVal = iota + ValString + ValInt64 + ValBigInt + ValBigRat + ValBigFloat +) + +// A CodeType distinguishes among go/types.Type encodings. +type CodeType int + +func (c CodeType) Marker() SyncMarker { return SyncType } +func (c CodeType) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + TypeBasic CodeType = iota + TypeNamed + TypePointer + TypeSlice + TypeArray + TypeChan + TypeMap + TypeSignature + TypeStruct + TypeInterface + TypeUnion + TypeTypeParam +) + +// A CodeObj distinguishes among go/types.Object encodings. +type CodeObj int + +func (c CodeObj) Marker() SyncMarker { return SyncCodeObj } +func (c CodeObj) Value() int { return int(c) } + +// Note: These values are public and cannot be changed without +// updating the go/types importers. + +const ( + ObjAlias CodeObj = iota + ObjConst + ObjType + ObjFunc + ObjVar + ObjStub +) diff --git a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go new file mode 100644 index 0000000000000..3205c9a16c568 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go @@ -0,0 +1,517 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "encoding/binary" + "errors" + "fmt" + "go/constant" + "go/token" + "io" + "math/big" + "os" + "runtime" + "strings" +) + +// A PkgDecoder provides methods for decoding a package's Unified IR +// export data. +type PkgDecoder struct { + // version is the file format version. + version uint32 + + // sync indicates whether the file uses sync markers. + sync bool + + // pkgPath is the package path for the package to be decoded. + // + // TODO(mdempsky): Remove; unneeded since CL 391014. + pkgPath string + + // elemData is the full data payload of the encoded package. + // Elements are densely and contiguously packed together. + // + // The last 8 bytes of elemData are the package fingerprint. + elemData string + + // elemEnds stores the byte-offset end positions of element + // bitstreams within elemData. + // + // For example, element I's bitstream data starts at elemEnds[I-1] + // (or 0, if I==0) and ends at elemEnds[I]. + // + // Note: elemEnds is indexed by absolute indices, not + // section-relative indices. + elemEnds []uint32 + + // elemEndsEnds stores the index-offset end positions of relocation + // sections within elemEnds. + // + // For example, section K's end positions start at elemEndsEnds[K-1] + // (or 0, if K==0) and end at elemEndsEnds[K]. + elemEndsEnds [numRelocs]uint32 + + scratchRelocEnt []RelocEnt +} + +// PkgPath returns the package path for the package +// +// TODO(mdempsky): Remove; unneeded since CL 391014. +func (pr *PkgDecoder) PkgPath() string { return pr.pkgPath } + +// SyncMarkers reports whether pr uses sync markers. +func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync } + +// NewPkgDecoder returns a PkgDecoder initialized to read the Unified +// IR export data from input. pkgPath is the package path for the +// compilation unit that produced the export data. +// +// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014. +func NewPkgDecoder(pkgPath, input string) PkgDecoder { + pr := PkgDecoder{ + pkgPath: pkgPath, + } + + // TODO(mdempsky): Implement direct indexing of input string to + // avoid copying the position information. + + r := strings.NewReader(input) + + assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil) + + switch pr.version { + default: + panic(fmt.Errorf("unsupported version: %v", pr.version)) + case 0: + // no flags + case 1: + var flags uint32 + assert(binary.Read(r, binary.LittleEndian, &flags) == nil) + pr.sync = flags&flagSyncMarkers != 0 + } + + assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil) + + pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1]) + assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil) + + pos, err := r.Seek(0, io.SeekCurrent) + assert(err == nil) + + pr.elemData = input[pos:] + assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1])) + + return pr +} + +// NumElems returns the number of elements in section k. +func (pr *PkgDecoder) NumElems(k RelocKind) int { + count := int(pr.elemEndsEnds[k]) + if k > 0 { + count -= int(pr.elemEndsEnds[k-1]) + } + return count +} + +// TotalElems returns the total number of elements across all sections. +func (pr *PkgDecoder) TotalElems() int { + return len(pr.elemEnds) +} + +// Fingerprint returns the package fingerprint. +func (pr *PkgDecoder) Fingerprint() [8]byte { + var fp [8]byte + copy(fp[:], pr.elemData[len(pr.elemData)-8:]) + return fp +} + +// AbsIdx returns the absolute index for the given (section, index) +// pair. +func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int { + absIdx := int(idx) + if k > 0 { + absIdx += int(pr.elemEndsEnds[k-1]) + } + if absIdx >= int(pr.elemEndsEnds[k]) { + errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) + } + return absIdx +} + +// DataIdx returns the raw element bitstream for the given (section, +// index) pair. +func (pr *PkgDecoder) DataIdx(k RelocKind, idx Index) string { + absIdx := pr.AbsIdx(k, idx) + + var start uint32 + if absIdx > 0 { + start = pr.elemEnds[absIdx-1] + } + end := pr.elemEnds[absIdx] + + return pr.elemData[start:end] +} + +// StringIdx returns the string value for the given string index. +func (pr *PkgDecoder) StringIdx(idx Index) string { + return pr.DataIdx(RelocString, idx) +} + +// NewDecoder returns a Decoder for the given (section, index) pair, +// and decodes the given SyncMarker from the element bitstream. +func (pr *PkgDecoder) NewDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder { + r := pr.NewDecoderRaw(k, idx) + r.Sync(marker) + return r +} + +// TempDecoder returns a Decoder for the given (section, index) pair, +// and decodes the given SyncMarker from the element bitstream. +// If possible the Decoder should be RetireDecoder'd when it is no longer +// needed, this will avoid heap allocations. +func (pr *PkgDecoder) TempDecoder(k RelocKind, idx Index, marker SyncMarker) Decoder { + r := pr.TempDecoderRaw(k, idx) + r.Sync(marker) + return r +} + +func (pr *PkgDecoder) RetireDecoder(d *Decoder) { + pr.scratchRelocEnt = d.Relocs + d.Relocs = nil +} + +// NewDecoderRaw returns a Decoder for the given (section, index) pair. +// +// Most callers should use NewDecoder instead. +func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder { + r := Decoder{ + common: pr, + k: k, + Idx: idx, + } + + // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved. + r.Data = *strings.NewReader(pr.DataIdx(k, idx)) + + r.Sync(SyncRelocs) + r.Relocs = make([]RelocEnt, r.Len()) + for i := range r.Relocs { + r.Sync(SyncReloc) + r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())} + } + + return r +} + +func (pr *PkgDecoder) TempDecoderRaw(k RelocKind, idx Index) Decoder { + r := Decoder{ + common: pr, + k: k, + Idx: idx, + } + + r.Data.Reset(pr.DataIdx(k, idx)) + r.Sync(SyncRelocs) + l := r.Len() + if cap(pr.scratchRelocEnt) >= l { + r.Relocs = pr.scratchRelocEnt[:l] + pr.scratchRelocEnt = nil + } else { + r.Relocs = make([]RelocEnt, l) + } + for i := range r.Relocs { + r.Sync(SyncReloc) + r.Relocs[i] = RelocEnt{RelocKind(r.Len()), Index(r.Len())} + } + + return r +} + +// A Decoder provides methods for decoding an individual element's +// bitstream data. +type Decoder struct { + common *PkgDecoder + + Relocs []RelocEnt + Data strings.Reader + + k RelocKind + Idx Index +} + +func (r *Decoder) checkErr(err error) { + if err != nil { + errorf("unexpected decoding error: %w", err) + } +} + +func (r *Decoder) rawUvarint() uint64 { + x, err := readUvarint(&r.Data) + r.checkErr(err) + return x +} + +// readUvarint is a type-specialized copy of encoding/binary.ReadUvarint. +// This avoids the interface conversion and thus has better escape properties, +// which flows up the stack. +func readUvarint(r *strings.Reader) (uint64, error) { + var x uint64 + var s uint + for i := 0; i < binary.MaxVarintLen64; i++ { + b, err := r.ReadByte() + if err != nil { + if i > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return x, err + } + if b < 0x80 { + if i == binary.MaxVarintLen64-1 && b > 1 { + return x, overflow + } + return x | uint64(b)<> 1) + if ux&1 != 0 { + x = ^x + } + return x +} + +func (r *Decoder) rawReloc(k RelocKind, idx int) Index { + e := r.Relocs[idx] + assert(e.Kind == k) + return e.Idx +} + +// Sync decodes a sync marker from the element bitstream and asserts +// that it matches the expected marker. +// +// If r.common.sync is false, then Sync is a no-op. +func (r *Decoder) Sync(mWant SyncMarker) { + if !r.common.sync { + return + } + + pos, _ := r.Data.Seek(0, io.SeekCurrent) + mHave := SyncMarker(r.rawUvarint()) + writerPCs := make([]int, r.rawUvarint()) + for i := range writerPCs { + writerPCs[i] = int(r.rawUvarint()) + } + + if mHave == mWant { + return + } + + // There's some tension here between printing: + // + // (1) full file paths that tools can recognize (e.g., so emacs + // hyperlinks the "file:line" text for easy navigation), or + // + // (2) short file paths that are easier for humans to read (e.g., by + // omitting redundant or irrelevant details, so it's easier to + // focus on the useful bits that remain). + // + // The current formatting favors the former, as it seems more + // helpful in practice. But perhaps the formatting could be improved + // to better address both concerns. For example, use relative file + // paths if they would be shorter, or rewrite file paths to contain + // "$GOROOT" (like objabi.AbsFile does) if tools can be taught how + // to reliably expand that again. + + fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.Idx, pos) + + fmt.Printf("\nfound %v, written at:\n", mHave) + if len(writerPCs) == 0 { + fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath) + } + for _, pc := range writerPCs { + fmt.Printf("\t%s\n", r.common.StringIdx(r.rawReloc(RelocString, pc))) + } + + fmt.Printf("\nexpected %v, reading at:\n", mWant) + var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size? + n := runtime.Callers(2, readerPCs[:]) + for _, pc := range fmtFrames(readerPCs[:n]...) { + fmt.Printf("\t%s\n", pc) + } + + // We already printed a stack trace for the reader, so now we can + // simply exit. Printing a second one with panic or base.Fatalf + // would just be noise. + os.Exit(1) +} + +// Bool decodes and returns a bool value from the element bitstream. +func (r *Decoder) Bool() bool { + r.Sync(SyncBool) + x, err := r.Data.ReadByte() + r.checkErr(err) + assert(x < 2) + return x != 0 +} + +// Int64 decodes and returns an int64 value from the element bitstream. +func (r *Decoder) Int64() int64 { + r.Sync(SyncInt64) + return r.rawVarint() +} + +// Int64 decodes and returns a uint64 value from the element bitstream. +func (r *Decoder) Uint64() uint64 { + r.Sync(SyncUint64) + return r.rawUvarint() +} + +// Len decodes and returns a non-negative int value from the element bitstream. +func (r *Decoder) Len() int { x := r.Uint64(); v := int(x); assert(uint64(v) == x); return v } + +// Int decodes and returns an int value from the element bitstream. +func (r *Decoder) Int() int { x := r.Int64(); v := int(x); assert(int64(v) == x); return v } + +// Uint decodes and returns a uint value from the element bitstream. +func (r *Decoder) Uint() uint { x := r.Uint64(); v := uint(x); assert(uint64(v) == x); return v } + +// Code decodes a Code value from the element bitstream and returns +// its ordinal value. It's the caller's responsibility to convert the +// result to an appropriate Code type. +// +// TODO(mdempsky): Ideally this method would have signature "Code[T +// Code] T" instead, but we don't allow generic methods and the +// compiler can't depend on generics yet anyway. +func (r *Decoder) Code(mark SyncMarker) int { + r.Sync(mark) + return r.Len() +} + +// Reloc decodes a relocation of expected section k from the element +// bitstream and returns an index to the referenced element. +func (r *Decoder) Reloc(k RelocKind) Index { + r.Sync(SyncUseReloc) + return r.rawReloc(k, r.Len()) +} + +// String decodes and returns a string value from the element +// bitstream. +func (r *Decoder) String() string { + r.Sync(SyncString) + return r.common.StringIdx(r.Reloc(RelocString)) +} + +// Strings decodes and returns a variable-length slice of strings from +// the element bitstream. +func (r *Decoder) Strings() []string { + res := make([]string, r.Len()) + for i := range res { + res[i] = r.String() + } + return res +} + +// Value decodes and returns a constant.Value from the element +// bitstream. +func (r *Decoder) Value() constant.Value { + r.Sync(SyncValue) + isComplex := r.Bool() + val := r.scalar() + if isComplex { + val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar())) + } + return val +} + +func (r *Decoder) scalar() constant.Value { + switch tag := CodeVal(r.Code(SyncVal)); tag { + default: + panic(fmt.Errorf("unexpected scalar tag: %v", tag)) + + case ValBool: + return constant.MakeBool(r.Bool()) + case ValString: + return constant.MakeString(r.String()) + case ValInt64: + return constant.MakeInt64(r.Int64()) + case ValBigInt: + return constant.Make(r.bigInt()) + case ValBigRat: + num := r.bigInt() + denom := r.bigInt() + return constant.Make(new(big.Rat).SetFrac(num, denom)) + case ValBigFloat: + return constant.Make(r.bigFloat()) + } +} + +func (r *Decoder) bigInt() *big.Int { + v := new(big.Int).SetBytes([]byte(r.String())) + if r.Bool() { + v.Neg(v) + } + return v +} + +func (r *Decoder) bigFloat() *big.Float { + v := new(big.Float).SetPrec(512) + assert(v.UnmarshalText([]byte(r.String())) == nil) + return v +} + +// @@@ Helpers + +// TODO(mdempsky): These should probably be removed. I think they're a +// smell that the export data format is not yet quite right. + +// PeekPkgPath returns the package path for the specified package +// index. +func (pr *PkgDecoder) PeekPkgPath(idx Index) string { + var path string + { + r := pr.TempDecoder(RelocPkg, idx, SyncPkgDef) + path = r.String() + pr.RetireDecoder(&r) + } + if path == "" { + path = pr.pkgPath + } + return path +} + +// PeekObj returns the package path, object name, and CodeObj for the +// specified object index. +func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) { + var ridx Index + var name string + var rcode int + { + r := pr.TempDecoder(RelocName, idx, SyncObject1) + r.Sync(SyncSym) + r.Sync(SyncPkg) + ridx = r.Reloc(RelocPkg) + name = r.String() + rcode = r.Code(SyncCodeObj) + pr.RetireDecoder(&r) + } + + path := pr.PeekPkgPath(ridx) + assert(name != "") + + tag := CodeObj(rcode) + + return path, name, tag +} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/doc.go b/vendor/golang.org/x/tools/internal/pkgbits/doc.go new file mode 100644 index 0000000000000..c8a2796b5e4cb --- /dev/null +++ b/vendor/golang.org/x/tools/internal/pkgbits/doc.go @@ -0,0 +1,32 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkgbits implements low-level coding abstractions for +// Unified IR's export data format. +// +// At a low-level, a package is a collection of bitstream elements. +// Each element has a "kind" and a dense, non-negative index. +// Elements can be randomly accessed given their kind and index. +// +// Individual elements are sequences of variable-length values (e.g., +// integers, booleans, strings, go/constant values, cross-references +// to other elements). Package pkgbits provides APIs for encoding and +// decoding these low-level values, but the details of mapping +// higher-level Go constructs into elements is left to higher-level +// abstractions. +// +// Elements may cross-reference each other with "relocations." For +// example, an element representing a pointer type has a relocation +// referring to the element type. +// +// Go constructs may be composed as a constellation of multiple +// elements. For example, a declared function may have one element to +// describe the object (e.g., its name, type, position), and a +// separate element to describe its function body. This allows readers +// some flexibility in efficiently seeking or re-reading data (e.g., +// inlining requires re-reading the function body for each inlined +// call, without needing to re-read the object-level details). +// +// This is a copy of internal/pkgbits in the Go implementation. +package pkgbits diff --git a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go new file mode 100644 index 0000000000000..e98e41171e1b6 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go @@ -0,0 +1,383 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "bytes" + "crypto/md5" + "encoding/binary" + "go/constant" + "io" + "math/big" + "runtime" +) + +// currentVersion is the current version number. +// +// - v0: initial prototype +// +// - v1: adds the flags uint32 word +const currentVersion uint32 = 1 + +// A PkgEncoder provides methods for encoding a package's Unified IR +// export data. +type PkgEncoder struct { + // elems holds the bitstream for previously encoded elements. + elems [numRelocs][]string + + // stringsIdx maps previously encoded strings to their index within + // the RelocString section, to allow deduplication. That is, + // elems[RelocString][stringsIdx[s]] == s (if present). + stringsIdx map[string]Index + + // syncFrames is the number of frames to write at each sync + // marker. A negative value means sync markers are omitted. + syncFrames int +} + +// SyncMarkers reports whether pw uses sync markers. +func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 } + +// NewPkgEncoder returns an initialized PkgEncoder. +// +// syncFrames is the number of caller frames that should be serialized +// at Sync points. Serializing additional frames results in larger +// export data files, but can help diagnosing desync errors in +// higher-level Unified IR reader/writer code. If syncFrames is +// negative, then sync markers are omitted entirely. +func NewPkgEncoder(syncFrames int) PkgEncoder { + return PkgEncoder{ + stringsIdx: make(map[string]Index), + syncFrames: syncFrames, + } +} + +// DumpTo writes the package's encoded data to out0 and returns the +// package fingerprint. +func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) { + h := md5.New() + out := io.MultiWriter(out0, h) + + writeUint32 := func(x uint32) { + assert(binary.Write(out, binary.LittleEndian, x) == nil) + } + + writeUint32(currentVersion) + + var flags uint32 + if pw.SyncMarkers() { + flags |= flagSyncMarkers + } + writeUint32(flags) + + // Write elemEndsEnds. + var sum uint32 + for _, elems := range &pw.elems { + sum += uint32(len(elems)) + writeUint32(sum) + } + + // Write elemEnds. + sum = 0 + for _, elems := range &pw.elems { + for _, elem := range elems { + sum += uint32(len(elem)) + writeUint32(sum) + } + } + + // Write elemData. + for _, elems := range &pw.elems { + for _, elem := range elems { + _, err := io.WriteString(out, elem) + assert(err == nil) + } + } + + // Write fingerprint. + copy(fingerprint[:], h.Sum(nil)) + _, err := out0.Write(fingerprint[:]) + assert(err == nil) + + return +} + +// StringIdx adds a string value to the strings section, if not +// already present, and returns its index. +func (pw *PkgEncoder) StringIdx(s string) Index { + if idx, ok := pw.stringsIdx[s]; ok { + assert(pw.elems[RelocString][idx] == s) + return idx + } + + idx := Index(len(pw.elems[RelocString])) + pw.elems[RelocString] = append(pw.elems[RelocString], s) + pw.stringsIdx[s] = idx + return idx +} + +// NewEncoder returns an Encoder for a new element within the given +// section, and encodes the given SyncMarker as the start of the +// element bitstream. +func (pw *PkgEncoder) NewEncoder(k RelocKind, marker SyncMarker) Encoder { + e := pw.NewEncoderRaw(k) + e.Sync(marker) + return e +} + +// NewEncoderRaw returns an Encoder for a new element within the given +// section. +// +// Most callers should use NewEncoder instead. +func (pw *PkgEncoder) NewEncoderRaw(k RelocKind) Encoder { + idx := Index(len(pw.elems[k])) + pw.elems[k] = append(pw.elems[k], "") // placeholder + + return Encoder{ + p: pw, + k: k, + Idx: idx, + } +} + +// An Encoder provides methods for encoding an individual element's +// bitstream data. +type Encoder struct { + p *PkgEncoder + + Relocs []RelocEnt + RelocMap map[RelocEnt]uint32 + Data bytes.Buffer // accumulated element bitstream data + + encodingRelocHeader bool + + k RelocKind + Idx Index // index within relocation section +} + +// Flush finalizes the element's bitstream and returns its Index. +func (w *Encoder) Flush() Index { + var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved + + // Backup the data so we write the relocations at the front. + var tmp bytes.Buffer + io.Copy(&tmp, &w.Data) + + // TODO(mdempsky): Consider writing these out separately so they're + // easier to strip, along with function bodies, so that we can prune + // down to just the data that's relevant to go/types. + if w.encodingRelocHeader { + panic("encodingRelocHeader already true; recursive flush?") + } + w.encodingRelocHeader = true + w.Sync(SyncRelocs) + w.Len(len(w.Relocs)) + for _, rEnt := range w.Relocs { + w.Sync(SyncReloc) + w.Len(int(rEnt.Kind)) + w.Len(int(rEnt.Idx)) + } + + io.Copy(&sb, &w.Data) + io.Copy(&sb, &tmp) + w.p.elems[w.k][w.Idx] = sb.String() + + return w.Idx +} + +func (w *Encoder) checkErr(err error) { + if err != nil { + errorf("unexpected encoding error: %v", err) + } +} + +func (w *Encoder) rawUvarint(x uint64) { + var buf [binary.MaxVarintLen64]byte + n := binary.PutUvarint(buf[:], x) + _, err := w.Data.Write(buf[:n]) + w.checkErr(err) +} + +func (w *Encoder) rawVarint(x int64) { + // Zig-zag encode. + ux := uint64(x) << 1 + if x < 0 { + ux = ^ux + } + + w.rawUvarint(ux) +} + +func (w *Encoder) rawReloc(r RelocKind, idx Index) int { + e := RelocEnt{r, idx} + if w.RelocMap != nil { + if i, ok := w.RelocMap[e]; ok { + return int(i) + } + } else { + w.RelocMap = make(map[RelocEnt]uint32) + } + + i := len(w.Relocs) + w.RelocMap[e] = uint32(i) + w.Relocs = append(w.Relocs, e) + return i +} + +func (w *Encoder) Sync(m SyncMarker) { + if !w.p.SyncMarkers() { + return + } + + // Writing out stack frame string references requires working + // relocations, but writing out the relocations themselves involves + // sync markers. To prevent infinite recursion, we simply trim the + // stack frame for sync markers within the relocation header. + var frames []string + if !w.encodingRelocHeader && w.p.syncFrames > 0 { + pcs := make([]uintptr, w.p.syncFrames) + n := runtime.Callers(2, pcs) + frames = fmtFrames(pcs[:n]...) + } + + // TODO(mdempsky): Save space by writing out stack frames as a + // linked list so we can share common stack frames. + w.rawUvarint(uint64(m)) + w.rawUvarint(uint64(len(frames))) + for _, frame := range frames { + w.rawUvarint(uint64(w.rawReloc(RelocString, w.p.StringIdx(frame)))) + } +} + +// Bool encodes and writes a bool value into the element bitstream, +// and then returns the bool value. +// +// For simple, 2-alternative encodings, the idiomatic way to call Bool +// is something like: +// +// if w.Bool(x != 0) { +// // alternative #1 +// } else { +// // alternative #2 +// } +// +// For multi-alternative encodings, use Code instead. +func (w *Encoder) Bool(b bool) bool { + w.Sync(SyncBool) + var x byte + if b { + x = 1 + } + err := w.Data.WriteByte(x) + w.checkErr(err) + return b +} + +// Int64 encodes and writes an int64 value into the element bitstream. +func (w *Encoder) Int64(x int64) { + w.Sync(SyncInt64) + w.rawVarint(x) +} + +// Uint64 encodes and writes a uint64 value into the element bitstream. +func (w *Encoder) Uint64(x uint64) { + w.Sync(SyncUint64) + w.rawUvarint(x) +} + +// Len encodes and writes a non-negative int value into the element bitstream. +func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) } + +// Int encodes and writes an int value into the element bitstream. +func (w *Encoder) Int(x int) { w.Int64(int64(x)) } + +// Len encodes and writes a uint value into the element bitstream. +func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) } + +// Reloc encodes and writes a relocation for the given (section, +// index) pair into the element bitstream. +// +// Note: Only the index is formally written into the element +// bitstream, so bitstream decoders must know from context which +// section an encoded relocation refers to. +func (w *Encoder) Reloc(r RelocKind, idx Index) { + w.Sync(SyncUseReloc) + w.Len(w.rawReloc(r, idx)) +} + +// Code encodes and writes a Code value into the element bitstream. +func (w *Encoder) Code(c Code) { + w.Sync(c.Marker()) + w.Len(c.Value()) +} + +// String encodes and writes a string value into the element +// bitstream. +// +// Internally, strings are deduplicated by adding them to the strings +// section (if not already present), and then writing a relocation +// into the element bitstream. +func (w *Encoder) String(s string) { + w.Sync(SyncString) + w.Reloc(RelocString, w.p.StringIdx(s)) +} + +// Strings encodes and writes a variable-length slice of strings into +// the element bitstream. +func (w *Encoder) Strings(ss []string) { + w.Len(len(ss)) + for _, s := range ss { + w.String(s) + } +} + +// Value encodes and writes a constant.Value into the element +// bitstream. +func (w *Encoder) Value(val constant.Value) { + w.Sync(SyncValue) + if w.Bool(val.Kind() == constant.Complex) { + w.scalar(constant.Real(val)) + w.scalar(constant.Imag(val)) + } else { + w.scalar(val) + } +} + +func (w *Encoder) scalar(val constant.Value) { + switch v := constant.Val(val).(type) { + default: + errorf("unhandled %v (%v)", val, val.Kind()) + case bool: + w.Code(ValBool) + w.Bool(v) + case string: + w.Code(ValString) + w.String(v) + case int64: + w.Code(ValInt64) + w.Int64(v) + case *big.Int: + w.Code(ValBigInt) + w.bigInt(v) + case *big.Rat: + w.Code(ValBigRat) + w.bigInt(v.Num()) + w.bigInt(v.Denom()) + case *big.Float: + w.Code(ValBigFloat) + w.bigFloat(v) + } +} + +func (w *Encoder) bigInt(v *big.Int) { + b := v.Bytes() + w.String(string(b)) // TODO: More efficient encoding. + w.Bool(v.Sign() < 0) +} + +func (w *Encoder) bigFloat(v *big.Float) { + b := v.Append(nil, 'p', -1) + w.String(string(b)) // TODO: More efficient encoding. +} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/flags.go b/vendor/golang.org/x/tools/internal/pkgbits/flags.go new file mode 100644 index 0000000000000..654222745facd --- /dev/null +++ b/vendor/golang.org/x/tools/internal/pkgbits/flags.go @@ -0,0 +1,9 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +const ( + flagSyncMarkers = 1 << iota // file format contains sync markers +) diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go new file mode 100644 index 0000000000000..5294f6a63edd7 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go @@ -0,0 +1,21 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.7 +// +build !go1.7 + +// TODO(mdempsky): Remove after #44505 is resolved + +package pkgbits + +import "runtime" + +func walkFrames(pcs []uintptr, visit frameVisitor) { + for _, pc := range pcs { + fn := runtime.FuncForPC(pc) + file, line := fn.FileLine(pc) + + visit(file, line, fn.Name(), pc-fn.Entry()) + } +} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go new file mode 100644 index 0000000000000..2324ae7adfe20 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go @@ -0,0 +1,28 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.7 +// +build go1.7 + +package pkgbits + +import "runtime" + +// walkFrames calls visit for each call frame represented by pcs. +// +// pcs should be a slice of PCs, as returned by runtime.Callers. +func walkFrames(pcs []uintptr, visit frameVisitor) { + if len(pcs) == 0 { + return + } + + frames := runtime.CallersFrames(pcs) + for { + frame, more := frames.Next() + visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry) + if !more { + return + } + } +} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/reloc.go b/vendor/golang.org/x/tools/internal/pkgbits/reloc.go new file mode 100644 index 0000000000000..fcdfb97ca9926 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/pkgbits/reloc.go @@ -0,0 +1,42 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// A RelocKind indicates a particular section within a unified IR export. +type RelocKind int32 + +// An Index represents a bitstream element index within a particular +// section. +type Index int32 + +// A relocEnt (relocation entry) is an entry in an element's local +// reference table. +// +// TODO(mdempsky): Rename this too. +type RelocEnt struct { + Kind RelocKind + Idx Index +} + +// Reserved indices within the meta relocation section. +const ( + PublicRootIdx Index = 0 + PrivateRootIdx Index = 1 +) + +const ( + RelocString RelocKind = iota + RelocMeta + RelocPosBase + RelocPkg + RelocName + RelocType + RelocObj + RelocObjExt + RelocObjDict + RelocBody + + numRelocs = iota +) diff --git a/vendor/golang.org/x/tools/internal/pkgbits/support.go b/vendor/golang.org/x/tools/internal/pkgbits/support.go new file mode 100644 index 0000000000000..ad26d3b28cae1 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/pkgbits/support.go @@ -0,0 +1,17 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import "fmt" + +func assert(b bool) { + if !b { + panic("assertion failed") + } +} + +func errorf(format string, args ...interface{}) { + panic(fmt.Errorf(format, args...)) +} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/sync.go b/vendor/golang.org/x/tools/internal/pkgbits/sync.go new file mode 100644 index 0000000000000..5bd51ef717007 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/pkgbits/sync.go @@ -0,0 +1,113 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +import ( + "fmt" + "strings" +) + +// fmtFrames formats a backtrace for reporting reader/writer desyncs. +func fmtFrames(pcs ...uintptr) []string { + res := make([]string, 0, len(pcs)) + walkFrames(pcs, func(file string, line int, name string, offset uintptr) { + // Trim package from function name. It's just redundant noise. + name = strings.TrimPrefix(name, "cmd/compile/internal/noder.") + + res = append(res, fmt.Sprintf("%s:%v: %s +0x%v", file, line, name, offset)) + }) + return res +} + +type frameVisitor func(file string, line int, name string, offset uintptr) + +// SyncMarker is an enum type that represents markers that may be +// written to export data to ensure the reader and writer stay +// synchronized. +type SyncMarker int + +//go:generate stringer -type=SyncMarker -trimprefix=Sync + +const ( + _ SyncMarker = iota + + // Public markers (known to go/types importers). + + // Low-level coding markers. + SyncEOF + SyncBool + SyncInt64 + SyncUint64 + SyncString + SyncValue + SyncVal + SyncRelocs + SyncReloc + SyncUseReloc + + // Higher-level object and type markers. + SyncPublic + SyncPos + SyncPosBase + SyncObject + SyncObject1 + SyncPkg + SyncPkgDef + SyncMethod + SyncType + SyncTypeIdx + SyncTypeParamNames + SyncSignature + SyncParams + SyncParam + SyncCodeObj + SyncSym + SyncLocalIdent + SyncSelector + + // Private markers (only known to cmd/compile). + SyncPrivate + + SyncFuncExt + SyncVarExt + SyncTypeExt + SyncPragma + + SyncExprList + SyncExprs + SyncExpr + SyncExprType + SyncAssign + SyncOp + SyncFuncLit + SyncCompLit + + SyncDecl + SyncFuncBody + SyncOpenScope + SyncCloseScope + SyncCloseAnotherScope + SyncDeclNames + SyncDeclName + + SyncStmts + SyncBlockStmt + SyncIfStmt + SyncForStmt + SyncSwitchStmt + SyncRangeStmt + SyncCaseClause + SyncCommClause + SyncSelectStmt + SyncDecls + SyncLabeledStmt + SyncUseObjLocal + SyncAddLocal + SyncLinkname + SyncStmt1 + SyncStmtsEnd + SyncLabel + SyncOptLabel +) diff --git a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go new file mode 100644 index 0000000000000..4a5b0ca5f2ffc --- /dev/null +++ b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go @@ -0,0 +1,89 @@ +// Code generated by "stringer -type=SyncMarker -trimprefix=Sync"; DO NOT EDIT. + +package pkgbits + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[SyncEOF-1] + _ = x[SyncBool-2] + _ = x[SyncInt64-3] + _ = x[SyncUint64-4] + _ = x[SyncString-5] + _ = x[SyncValue-6] + _ = x[SyncVal-7] + _ = x[SyncRelocs-8] + _ = x[SyncReloc-9] + _ = x[SyncUseReloc-10] + _ = x[SyncPublic-11] + _ = x[SyncPos-12] + _ = x[SyncPosBase-13] + _ = x[SyncObject-14] + _ = x[SyncObject1-15] + _ = x[SyncPkg-16] + _ = x[SyncPkgDef-17] + _ = x[SyncMethod-18] + _ = x[SyncType-19] + _ = x[SyncTypeIdx-20] + _ = x[SyncTypeParamNames-21] + _ = x[SyncSignature-22] + _ = x[SyncParams-23] + _ = x[SyncParam-24] + _ = x[SyncCodeObj-25] + _ = x[SyncSym-26] + _ = x[SyncLocalIdent-27] + _ = x[SyncSelector-28] + _ = x[SyncPrivate-29] + _ = x[SyncFuncExt-30] + _ = x[SyncVarExt-31] + _ = x[SyncTypeExt-32] + _ = x[SyncPragma-33] + _ = x[SyncExprList-34] + _ = x[SyncExprs-35] + _ = x[SyncExpr-36] + _ = x[SyncExprType-37] + _ = x[SyncAssign-38] + _ = x[SyncOp-39] + _ = x[SyncFuncLit-40] + _ = x[SyncCompLit-41] + _ = x[SyncDecl-42] + _ = x[SyncFuncBody-43] + _ = x[SyncOpenScope-44] + _ = x[SyncCloseScope-45] + _ = x[SyncCloseAnotherScope-46] + _ = x[SyncDeclNames-47] + _ = x[SyncDeclName-48] + _ = x[SyncStmts-49] + _ = x[SyncBlockStmt-50] + _ = x[SyncIfStmt-51] + _ = x[SyncForStmt-52] + _ = x[SyncSwitchStmt-53] + _ = x[SyncRangeStmt-54] + _ = x[SyncCaseClause-55] + _ = x[SyncCommClause-56] + _ = x[SyncSelectStmt-57] + _ = x[SyncDecls-58] + _ = x[SyncLabeledStmt-59] + _ = x[SyncUseObjLocal-60] + _ = x[SyncAddLocal-61] + _ = x[SyncLinkname-62] + _ = x[SyncStmt1-63] + _ = x[SyncStmtsEnd-64] + _ = x[SyncLabel-65] + _ = x[SyncOptLabel-66] +} + +const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel" + +var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458} + +func (i SyncMarker) String() string { + i -= 1 + if i < 0 || i >= SyncMarker(len(_SyncMarker_index)-1) { + return "SyncMarker(" + strconv.FormatInt(int64(i+1), 10) + ")" + } + return _SyncMarker_name[_SyncMarker_index[i]:_SyncMarker_index[i+1]] +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go new file mode 100644 index 0000000000000..25a1426d30ec2 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -0,0 +1,179 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeparams contains common utilities for writing tools that interact +// with generic Go code, as introduced with Go 1.18. +// +// Many of the types and functions in this package are proxies for the new APIs +// introduced in the standard library with Go 1.18. For example, the +// typeparams.Union type is an alias for go/types.Union, and the ForTypeSpec +// function returns the value of the go/ast.TypeSpec.TypeParams field. At Go +// versions older than 1.18 these helpers are implemented as stubs, allowing +// users of this package to write code that handles generic constructs inline, +// even if the Go version being used to compile does not support generics. +// +// Additionally, this package contains common utilities for working with the +// new generic constructs, to supplement the standard library APIs. Notably, +// the StructuralTerms API computes a minimal representation of the structural +// restrictions on a type parameter. +// +// An external version of these APIs is available in the +// golang.org/x/exp/typeparams module. +package typeparams + +import ( + "go/ast" + "go/token" + "go/types" +) + +// UnpackIndexExpr extracts data from AST nodes that represent index +// expressions. +// +// For an ast.IndexExpr, the resulting indices slice will contain exactly one +// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable +// number of index expressions. +// +// For nodes that don't represent index expressions, the first return value of +// UnpackIndexExpr will be nil. +func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) { + switch e := n.(type) { + case *ast.IndexExpr: + return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack + case *IndexListExpr: + return e.X, e.Lbrack, e.Indices, e.Rbrack + } + return nil, token.NoPos, nil, token.NoPos +} + +// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on +// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0 +// will panic. +func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr { + switch len(indices) { + case 0: + panic("empty indices") + case 1: + return &ast.IndexExpr{ + X: x, + Lbrack: lbrack, + Index: indices[0], + Rbrack: rbrack, + } + default: + return &IndexListExpr{ + X: x, + Lbrack: lbrack, + Indices: indices, + Rbrack: rbrack, + } + } +} + +// IsTypeParam reports whether t is a type parameter. +func IsTypeParam(t types.Type) bool { + _, ok := t.(*TypeParam) + return ok +} + +// OriginMethod returns the origin method associated with the method fn. +// For methods on a non-generic receiver base type, this is just +// fn. However, for methods with a generic receiver, OriginMethod returns the +// corresponding method in the method set of the origin type. +// +// As a special case, if fn is not a method (has no receiver), OriginMethod +// returns fn. +func OriginMethod(fn *types.Func) *types.Func { + recv := fn.Type().(*types.Signature).Recv() + if recv == nil { + + return fn + } + base := recv.Type() + p, isPtr := base.(*types.Pointer) + if isPtr { + base = p.Elem() + } + named, isNamed := base.(*types.Named) + if !isNamed { + // Receiver is a *types.Interface. + return fn + } + if ForNamed(named).Len() == 0 { + // Receiver base has no type parameters, so we can avoid the lookup below. + return fn + } + orig := NamedTypeOrigin(named) + gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name()) + return gfn.(*types.Func) +} + +// GenericAssignableTo is a generalization of types.AssignableTo that +// implements the following rule for uninstantiated generic types: +// +// If V and T are generic named types, then V is considered assignable to T if, +// for every possible instantation of V[A_1, ..., A_N], the instantiation +// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N]. +// +// If T has structural constraints, they must be satisfied by V. +// +// For example, consider the following type declarations: +// +// type Interface[T any] interface { +// Accept(T) +// } +// +// type Container[T any] struct { +// Element T +// } +// +// func (c Container[T]) Accept(t T) { c.Element = t } +// +// In this case, GenericAssignableTo reports that instantiations of Container +// are assignable to the corresponding instantiation of Interface. +func GenericAssignableTo(ctxt *Context, V, T types.Type) bool { + // If V and T are not both named, or do not have matching non-empty type + // parameter lists, fall back on types.AssignableTo. + + VN, Vnamed := V.(*types.Named) + TN, Tnamed := T.(*types.Named) + if !Vnamed || !Tnamed { + return types.AssignableTo(V, T) + } + + vtparams := ForNamed(VN) + ttparams := ForNamed(TN) + if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || NamedTypeArgs(VN).Len() != 0 || NamedTypeArgs(TN).Len() != 0 { + return types.AssignableTo(V, T) + } + + // V and T have the same (non-zero) number of type params. Instantiate both + // with the type parameters of V. This must always succeed for V, and will + // succeed for T if and only if the type set of each type parameter of V is a + // subset of the type set of the corresponding type parameter of T, meaning + // that every instantiation of V corresponds to a valid instantiation of T. + + // Minor optimization: ensure we share a context across the two + // instantiations below. + if ctxt == nil { + ctxt = NewContext() + } + + var targs []types.Type + for i := 0; i < vtparams.Len(); i++ { + targs = append(targs, vtparams.At(i)) + } + + vinst, err := Instantiate(ctxt, V, targs, true) + if err != nil { + panic("type parameters should satisfy their own constraints") + } + + tinst, err := Instantiate(ctxt, T, targs, true) + if err != nil { + return false + } + + return types.AssignableTo(vinst, tinst) +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go new file mode 100644 index 0000000000000..993135ec90e89 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go @@ -0,0 +1,122 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "go/types" +) + +// CoreType returns the core type of T or nil if T does not have a core type. +// +// See https://go.dev/ref/spec#Core_types for the definition of a core type. +func CoreType(T types.Type) types.Type { + U := T.Underlying() + if _, ok := U.(*types.Interface); !ok { + return U // for non-interface types, + } + + terms, err := _NormalTerms(U) + if len(terms) == 0 || err != nil { + // len(terms) -> empty type set of interface. + // err != nil => U is invalid, exceeds complexity bounds, or has an empty type set. + return nil // no core type. + } + + U = terms[0].Type().Underlying() + var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying()) + for identical = 1; identical < len(terms); identical++ { + if !types.Identical(U, terms[identical].Type().Underlying()) { + break + } + } + + if identical == len(terms) { + // https://go.dev/ref/spec#Core_types + // "There is a single type U which is the underlying type of all types in the type set of T" + return U + } + ch, ok := U.(*types.Chan) + if !ok { + return nil // no core type as identical < len(terms) and U is not a channel. + } + // https://go.dev/ref/spec#Core_types + // "the type chan E if T contains only bidirectional channels, or the type chan<- E or + // <-chan E depending on the direction of the directional channels present." + for chans := identical; chans < len(terms); chans++ { + curr, ok := terms[chans].Type().Underlying().(*types.Chan) + if !ok { + return nil + } + if !types.Identical(ch.Elem(), curr.Elem()) { + return nil // channel elements are not identical. + } + if ch.Dir() == types.SendRecv { + // ch is bidirectional. We can safely always use curr's direction. + ch = curr + } else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() { + // ch and curr are not bidirectional and not the same direction. + return nil + } + } + return ch +} + +// _NormalTerms returns a slice of terms representing the normalized structural +// type restrictions of a type, if any. +// +// For all types other than *types.TypeParam, *types.Interface, and +// *types.Union, this is just a single term with Tilde() == false and +// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see +// below. +// +// Structural type restrictions of a type parameter are created via +// non-interface types embedded in its constraint interface (directly, or via a +// chain of interface embeddings). For example, in the declaration type +// T[P interface{~int; m()}] int the structural restriction of the type +// parameter P is ~int. +// +// With interface embedding and unions, the specification of structural type +// restrictions may be arbitrarily complex. For example, consider the +// following: +// +// type A interface{ ~string|~[]byte } +// +// type B interface{ int|string } +// +// type C interface { ~string|~int } +// +// type T[P interface{ A|B; C }] int +// +// In this example, the structural type restriction of P is ~string|int: A|B +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, +// which when intersected with C (~string|~int) yields ~string|int. +// +// _NormalTerms computes these expansions and reductions, producing a +// "normalized" form of the embeddings. A structural restriction is normalized +// if it is a single union containing no interface terms, and is minimal in the +// sense that removing any term changes the set of types satisfying the +// constraint. It is left as a proof for the reader that, modulo sorting, there +// is exactly one such normalized form. +// +// Because the minimal representation always takes this form, _NormalTerms +// returns a slice of tilde terms corresponding to the terms of the union in +// the normalized structural restriction. An error is returned if the type is +// invalid, exceeds complexity bounds, or has an empty type set. In the latter +// case, _NormalTerms returns ErrEmptyTypeSet. +// +// _NormalTerms makes no guarantees about the order of terms, except that it +// is deterministic. +func _NormalTerms(typ types.Type) ([]*Term, error) { + switch typ := typ.(type) { + case *TypeParam: + return StructuralTerms(typ) + case *Union: + return UnionTermSet(typ) + case *types.Interface: + return InterfaceTermSet(typ) + default: + return []*Term{NewTerm(false, typ)}, nil + } +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go b/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go new file mode 100644 index 0000000000000..18212390e1927 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/enabled_go117.go @@ -0,0 +1,12 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package typeparams + +// Enabled reports whether type parameters are enabled in the current build +// environment. +const Enabled = false diff --git a/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go b/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go new file mode 100644 index 0000000000000..d67148823c4d6 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/enabled_go118.go @@ -0,0 +1,15 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package typeparams + +// Note: this constant is in a separate file as this is the only acceptable +// diff between the <1.18 API of this package and the 1.18 API. + +// Enabled reports whether type parameters are enabled in the current build +// environment. +const Enabled = true diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go new file mode 100644 index 0000000000000..9c631b6512ded --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go @@ -0,0 +1,218 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "errors" + "fmt" + "go/types" + "os" + "strings" +) + +//go:generate go run copytermlist.go + +const debug = false + +var ErrEmptyTypeSet = errors.New("empty type set") + +// StructuralTerms returns a slice of terms representing the normalized +// structural type restrictions of a type parameter, if any. +// +// Structural type restrictions of a type parameter are created via +// non-interface types embedded in its constraint interface (directly, or via a +// chain of interface embeddings). For example, in the declaration +// +// type T[P interface{~int; m()}] int +// +// the structural restriction of the type parameter P is ~int. +// +// With interface embedding and unions, the specification of structural type +// restrictions may be arbitrarily complex. For example, consider the +// following: +// +// type A interface{ ~string|~[]byte } +// +// type B interface{ int|string } +// +// type C interface { ~string|~int } +// +// type T[P interface{ A|B; C }] int +// +// In this example, the structural type restriction of P is ~string|int: A|B +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, +// which when intersected with C (~string|~int) yields ~string|int. +// +// StructuralTerms computes these expansions and reductions, producing a +// "normalized" form of the embeddings. A structural restriction is normalized +// if it is a single union containing no interface terms, and is minimal in the +// sense that removing any term changes the set of types satisfying the +// constraint. It is left as a proof for the reader that, modulo sorting, there +// is exactly one such normalized form. +// +// Because the minimal representation always takes this form, StructuralTerms +// returns a slice of tilde terms corresponding to the terms of the union in +// the normalized structural restriction. An error is returned if the +// constraint interface is invalid, exceeds complexity bounds, or has an empty +// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet. +// +// StructuralTerms makes no guarantees about the order of terms, except that it +// is deterministic. +func StructuralTerms(tparam *TypeParam) ([]*Term, error) { + constraint := tparam.Constraint() + if constraint == nil { + return nil, fmt.Errorf("%s has nil constraint", tparam) + } + iface, _ := constraint.Underlying().(*types.Interface) + if iface == nil { + return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying()) + } + return InterfaceTermSet(iface) +} + +// InterfaceTermSet computes the normalized terms for a constraint interface, +// returning an error if the term set cannot be computed or is empty. In the +// latter case, the error will be ErrEmptyTypeSet. +// +// See the documentation of StructuralTerms for more information on +// normalization. +func InterfaceTermSet(iface *types.Interface) ([]*Term, error) { + return computeTermSet(iface) +} + +// UnionTermSet computes the normalized terms for a union, returning an error +// if the term set cannot be computed or is empty. In the latter case, the +// error will be ErrEmptyTypeSet. +// +// See the documentation of StructuralTerms for more information on +// normalization. +func UnionTermSet(union *Union) ([]*Term, error) { + return computeTermSet(union) +} + +func computeTermSet(typ types.Type) ([]*Term, error) { + tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0) + if err != nil { + return nil, err + } + if tset.terms.isEmpty() { + return nil, ErrEmptyTypeSet + } + if tset.terms.isAll() { + return nil, nil + } + var terms []*Term + for _, term := range tset.terms { + terms = append(terms, NewTerm(term.tilde, term.typ)) + } + return terms, nil +} + +// A termSet holds the normalized set of terms for a given type. +// +// The name termSet is intentionally distinct from 'type set': a type set is +// all types that implement a type (and includes method restrictions), whereas +// a term set just represents the structural restrictions on a type. +type termSet struct { + complete bool + terms termlist +} + +func indentf(depth int, format string, args ...interface{}) { + fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...) +} + +func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) { + if t == nil { + panic("nil type") + } + + if debug { + indentf(depth, "%s", t.String()) + defer func() { + if err != nil { + indentf(depth, "=> %s", err) + } else { + indentf(depth, "=> %s", res.terms.String()) + } + }() + } + + const maxTermCount = 100 + if tset, ok := seen[t]; ok { + if !tset.complete { + return nil, fmt.Errorf("cycle detected in the declaration of %s", t) + } + return tset, nil + } + + // Mark the current type as seen to avoid infinite recursion. + tset := new(termSet) + defer func() { + tset.complete = true + }() + seen[t] = tset + + switch u := t.Underlying().(type) { + case *types.Interface: + // The term set of an interface is the intersection of the term sets of its + // embedded types. + tset.terms = allTermlist + for i := 0; i < u.NumEmbeddeds(); i++ { + embedded := u.EmbeddedType(i) + if _, ok := embedded.Underlying().(*TypeParam); ok { + return nil, fmt.Errorf("invalid embedded type %T", embedded) + } + tset2, err := computeTermSetInternal(embedded, seen, depth+1) + if err != nil { + return nil, err + } + tset.terms = tset.terms.intersect(tset2.terms) + } + case *Union: + // The term set of a union is the union of term sets of its terms. + tset.terms = nil + for i := 0; i < u.Len(); i++ { + t := u.Term(i) + var terms termlist + switch t.Type().Underlying().(type) { + case *types.Interface: + tset2, err := computeTermSetInternal(t.Type(), seen, depth+1) + if err != nil { + return nil, err + } + terms = tset2.terms + case *TypeParam, *Union: + // A stand-alone type parameter or union is not permitted as union + // term. + return nil, fmt.Errorf("invalid union term %T", t) + default: + if t.Type() == types.Typ[types.Invalid] { + continue + } + terms = termlist{{t.Tilde(), t.Type()}} + } + tset.terms = tset.terms.union(terms) + if len(tset.terms) > maxTermCount { + return nil, fmt.Errorf("exceeded max term count %d", maxTermCount) + } + } + case *TypeParam: + panic("unreachable") + default: + // For all other types, the term set is just a single non-tilde term + // holding the type itself. + if u != types.Typ[types.Invalid] { + tset.terms = termlist{{false, t}} + } + } + return tset, nil +} + +// under is a facade for the go/types internal function of the same name. It is +// used by typeterm.go. +func under(t types.Type) types.Type { + return t.Underlying() +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go new file mode 100644 index 0000000000000..933106a23dd43 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/termlist.go @@ -0,0 +1,163 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by copytermlist.go DO NOT EDIT. + +package typeparams + +import ( + "bytes" + "go/types" +) + +// A termlist represents the type set represented by the union +// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn. +// A termlist is in normal form if all terms are disjoint. +// termlist operations don't require the operands to be in +// normal form. +type termlist []*term + +// allTermlist represents the set of all types. +// It is in normal form. +var allTermlist = termlist{new(term)} + +// String prints the termlist exactly (without normalization). +func (xl termlist) String() string { + if len(xl) == 0 { + return "∅" + } + var buf bytes.Buffer + for i, x := range xl { + if i > 0 { + buf.WriteString(" ∪ ") + } + buf.WriteString(x.String()) + } + return buf.String() +} + +// isEmpty reports whether the termlist xl represents the empty set of types. +func (xl termlist) isEmpty() bool { + // If there's a non-nil term, the entire list is not empty. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil { + return false + } + } + return true +} + +// isAll reports whether the termlist xl represents the set of all types. +func (xl termlist) isAll() bool { + // If there's a 𝓤 term, the entire list is 𝓤. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil && x.typ == nil { + return true + } + } + return false +} + +// norm returns the normal form of xl. +func (xl termlist) norm() termlist { + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + used := make([]bool, len(xl)) + var rl termlist + for i, xi := range xl { + if xi == nil || used[i] { + continue + } + for j := i + 1; j < len(xl); j++ { + xj := xl[j] + if xj == nil || used[j] { + continue + } + if u1, u2 := xi.union(xj); u2 == nil { + // If we encounter a 𝓤 term, the entire list is 𝓤. + // Exit early. + // (Note that this is not just an optimization; + // if we continue, we may end up with a 𝓤 term + // and other terms and the result would not be + // in normal form.) + if u1.typ == nil { + return allTermlist + } + xi = u1 + used[j] = true // xj is now unioned into xi - ignore it in future iterations + } + } + rl = append(rl, xi) + } + return rl +} + +// union returns the union xl ∪ yl. +func (xl termlist) union(yl termlist) termlist { + return append(xl, yl...).norm() +} + +// intersect returns the intersection xl ∩ yl. +func (xl termlist) intersect(yl termlist) termlist { + if xl.isEmpty() || yl.isEmpty() { + return nil + } + + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + var rl termlist + for _, x := range xl { + for _, y := range yl { + if r := x.intersect(y); r != nil { + rl = append(rl, r) + } + } + } + return rl.norm() +} + +// equal reports whether xl and yl represent the same type set. +func (xl termlist) equal(yl termlist) bool { + // TODO(gri) this should be more efficient + return xl.subsetOf(yl) && yl.subsetOf(xl) +} + +// includes reports whether t ∈ xl. +func (xl termlist) includes(t types.Type) bool { + for _, x := range xl { + if x.includes(t) { + return true + } + } + return false +} + +// supersetOf reports whether y ⊆ xl. +func (xl termlist) supersetOf(y *term) bool { + for _, x := range xl { + if y.subsetOf(x) { + return true + } + } + return false +} + +// subsetOf reports whether xl ⊆ yl. +func (xl termlist) subsetOf(yl termlist) bool { + if yl.isEmpty() { + return xl.isEmpty() + } + + // each term x of xl must be a subset of yl + for _, x := range xl { + if !yl.supersetOf(x) { + return false // x is not a subset yl + } + } + return true +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go new file mode 100644 index 0000000000000..b4788978ff4b6 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go @@ -0,0 +1,197 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.18 +// +build !go1.18 + +package typeparams + +import ( + "go/ast" + "go/token" + "go/types" +) + +func unsupported() { + panic("type parameters are unsupported at this go version") +} + +// IndexListExpr is a placeholder type, as type parameters are not supported at +// this Go version. Its methods panic on use. +type IndexListExpr struct { + ast.Expr + X ast.Expr // expression + Lbrack token.Pos // position of "[" + Indices []ast.Expr // index expressions + Rbrack token.Pos // position of "]" +} + +// ForTypeSpec returns an empty field list, as type parameters on not supported +// at this Go version. +func ForTypeSpec(*ast.TypeSpec) *ast.FieldList { + return nil +} + +// ForFuncType returns an empty field list, as type parameters are not +// supported at this Go version. +func ForFuncType(*ast.FuncType) *ast.FieldList { + return nil +} + +// TypeParam is a placeholder type, as type parameters are not supported at +// this Go version. Its methods panic on use. +type TypeParam struct{ types.Type } + +func (*TypeParam) Index() int { unsupported(); return 0 } +func (*TypeParam) Constraint() types.Type { unsupported(); return nil } +func (*TypeParam) Obj() *types.TypeName { unsupported(); return nil } + +// TypeParamList is a placeholder for an empty type parameter list. +type TypeParamList struct{} + +func (*TypeParamList) Len() int { return 0 } +func (*TypeParamList) At(int) *TypeParam { unsupported(); return nil } + +// TypeList is a placeholder for an empty type list. +type TypeList struct{} + +func (*TypeList) Len() int { return 0 } +func (*TypeList) At(int) types.Type { unsupported(); return nil } + +// NewTypeParam is unsupported at this Go version, and panics. +func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam { + unsupported() + return nil +} + +// SetTypeParamConstraint is unsupported at this Go version, and panics. +func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) { + unsupported() +} + +// NewSignatureType calls types.NewSignature, panicking if recvTypeParams or +// typeParams is non-empty. +func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature { + if len(recvTypeParams) != 0 || len(typeParams) != 0 { + panic("signatures cannot have type parameters at this Go version") + } + return types.NewSignature(recv, params, results, variadic) +} + +// ForSignature returns an empty slice. +func ForSignature(*types.Signature) *TypeParamList { + return nil +} + +// RecvTypeParams returns a nil slice. +func RecvTypeParams(sig *types.Signature) *TypeParamList { + return nil +} + +// IsComparable returns false, as no interfaces are type-restricted at this Go +// version. +func IsComparable(*types.Interface) bool { + return false +} + +// IsMethodSet returns true, as no interfaces are type-restricted at this Go +// version. +func IsMethodSet(*types.Interface) bool { + return true +} + +// IsImplicit returns false, as no interfaces are implicit at this Go version. +func IsImplicit(*types.Interface) bool { + return false +} + +// MarkImplicit does nothing, because this Go version does not have implicit +// interfaces. +func MarkImplicit(*types.Interface) {} + +// ForNamed returns an empty type parameter list, as type parameters are not +// supported at this Go version. +func ForNamed(*types.Named) *TypeParamList { + return nil +} + +// SetForNamed panics if tparams is non-empty. +func SetForNamed(_ *types.Named, tparams []*TypeParam) { + if len(tparams) > 0 { + unsupported() + } +} + +// NamedTypeArgs returns nil. +func NamedTypeArgs(*types.Named) *TypeList { + return nil +} + +// NamedTypeOrigin is the identity method at this Go version. +func NamedTypeOrigin(named *types.Named) types.Type { + return named +} + +// Term holds information about a structural type restriction. +type Term struct { + tilde bool + typ types.Type +} + +func (m *Term) Tilde() bool { return m.tilde } +func (m *Term) Type() types.Type { return m.typ } +func (m *Term) String() string { + pre := "" + if m.tilde { + pre = "~" + } + return pre + m.typ.String() +} + +// NewTerm is unsupported at this Go version, and panics. +func NewTerm(tilde bool, typ types.Type) *Term { + return &Term{tilde, typ} +} + +// Union is a placeholder type, as type parameters are not supported at this Go +// version. Its methods panic on use. +type Union struct{ types.Type } + +func (*Union) Len() int { return 0 } +func (*Union) Term(i int) *Term { unsupported(); return nil } + +// NewUnion is unsupported at this Go version, and panics. +func NewUnion(terms []*Term) *Union { + unsupported() + return nil +} + +// InitInstanceInfo is a noop at this Go version. +func InitInstanceInfo(*types.Info) {} + +// Instance is a placeholder type, as type parameters are not supported at this +// Go version. +type Instance struct { + TypeArgs *TypeList + Type types.Type +} + +// GetInstances returns a nil map, as type parameters are not supported at this +// Go version. +func GetInstances(info *types.Info) map[*ast.Ident]Instance { return nil } + +// Context is a placeholder type, as type parameters are not supported at +// this Go version. +type Context struct{} + +// NewContext returns a placeholder Context instance. +func NewContext() *Context { + return &Context{} +} + +// Instantiate is unsupported on this Go version, and panics. +func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { + unsupported() + return nil, nil +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go new file mode 100644 index 0000000000000..114a36b866bc0 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go @@ -0,0 +1,151 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package typeparams + +import ( + "go/ast" + "go/types" +) + +// IndexListExpr is an alias for ast.IndexListExpr. +type IndexListExpr = ast.IndexListExpr + +// ForTypeSpec returns n.TypeParams. +func ForTypeSpec(n *ast.TypeSpec) *ast.FieldList { + if n == nil { + return nil + } + return n.TypeParams +} + +// ForFuncType returns n.TypeParams. +func ForFuncType(n *ast.FuncType) *ast.FieldList { + if n == nil { + return nil + } + return n.TypeParams +} + +// TypeParam is an alias for types.TypeParam +type TypeParam = types.TypeParam + +// TypeParamList is an alias for types.TypeParamList +type TypeParamList = types.TypeParamList + +// TypeList is an alias for types.TypeList +type TypeList = types.TypeList + +// NewTypeParam calls types.NewTypeParam. +func NewTypeParam(name *types.TypeName, constraint types.Type) *TypeParam { + return types.NewTypeParam(name, constraint) +} + +// SetTypeParamConstraint calls tparam.SetConstraint(constraint). +func SetTypeParamConstraint(tparam *TypeParam, constraint types.Type) { + tparam.SetConstraint(constraint) +} + +// NewSignatureType calls types.NewSignatureType. +func NewSignatureType(recv *types.Var, recvTypeParams, typeParams []*TypeParam, params, results *types.Tuple, variadic bool) *types.Signature { + return types.NewSignatureType(recv, recvTypeParams, typeParams, params, results, variadic) +} + +// ForSignature returns sig.TypeParams() +func ForSignature(sig *types.Signature) *TypeParamList { + return sig.TypeParams() +} + +// RecvTypeParams returns sig.RecvTypeParams(). +func RecvTypeParams(sig *types.Signature) *TypeParamList { + return sig.RecvTypeParams() +} + +// IsComparable calls iface.IsComparable(). +func IsComparable(iface *types.Interface) bool { + return iface.IsComparable() +} + +// IsMethodSet calls iface.IsMethodSet(). +func IsMethodSet(iface *types.Interface) bool { + return iface.IsMethodSet() +} + +// IsImplicit calls iface.IsImplicit(). +func IsImplicit(iface *types.Interface) bool { + return iface.IsImplicit() +} + +// MarkImplicit calls iface.MarkImplicit(). +func MarkImplicit(iface *types.Interface) { + iface.MarkImplicit() +} + +// ForNamed extracts the (possibly empty) type parameter object list from +// named. +func ForNamed(named *types.Named) *TypeParamList { + return named.TypeParams() +} + +// SetForNamed sets the type params tparams on n. Each tparam must be of +// dynamic type *types.TypeParam. +func SetForNamed(n *types.Named, tparams []*TypeParam) { + n.SetTypeParams(tparams) +} + +// NamedTypeArgs returns named.TypeArgs(). +func NamedTypeArgs(named *types.Named) *TypeList { + return named.TypeArgs() +} + +// NamedTypeOrigin returns named.Orig(). +func NamedTypeOrigin(named *types.Named) types.Type { + return named.Origin() +} + +// Term is an alias for types.Term. +type Term = types.Term + +// NewTerm calls types.NewTerm. +func NewTerm(tilde bool, typ types.Type) *Term { + return types.NewTerm(tilde, typ) +} + +// Union is an alias for types.Union +type Union = types.Union + +// NewUnion calls types.NewUnion. +func NewUnion(terms []*Term) *Union { + return types.NewUnion(terms) +} + +// InitInstanceInfo initializes info to record information about type and +// function instances. +func InitInstanceInfo(info *types.Info) { + info.Instances = make(map[*ast.Ident]types.Instance) +} + +// Instance is an alias for types.Instance. +type Instance = types.Instance + +// GetInstances returns info.Instances. +func GetInstances(info *types.Info) map[*ast.Ident]Instance { + return info.Instances +} + +// Context is an alias for types.Context. +type Context = types.Context + +// NewContext calls types.NewContext. +func NewContext() *Context { + return types.NewContext() +} + +// Instantiate calls types.Instantiate. +func Instantiate(ctxt *Context, typ types.Type, targs []types.Type, validate bool) (types.Type, error) { + return types.Instantiate(ctxt, typ, targs, validate) +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go new file mode 100644 index 0000000000000..7ddee28d98754 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go @@ -0,0 +1,170 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by copytermlist.go DO NOT EDIT. + +package typeparams + +import "go/types" + +// A term describes elementary type sets: +// +// ∅: (*term)(nil) == ∅ // set of no types (empty set) +// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse) +// T: &term{false, T} == {T} // set of type T +// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t +// +type term struct { + tilde bool // valid if typ != nil + typ types.Type +} + +func (x *term) String() string { + switch { + case x == nil: + return "∅" + case x.typ == nil: + return "𝓤" + case x.tilde: + return "~" + x.typ.String() + default: + return x.typ.String() + } +} + +// equal reports whether x and y represent the same type set. +func (x *term) equal(y *term) bool { + // easy cases + switch { + case x == nil || y == nil: + return x == y + case x.typ == nil || y.typ == nil: + return x.typ == y.typ + } + // ∅ ⊂ x, y ⊂ 𝓤 + + return x.tilde == y.tilde && types.Identical(x.typ, y.typ) +} + +// union returns the union x ∪ y: zero, one, or two non-nil terms. +func (x *term) union(y *term) (_, _ *term) { + // easy cases + switch { + case x == nil && y == nil: + return nil, nil // ∅ ∪ ∅ == ∅ + case x == nil: + return y, nil // ∅ ∪ y == y + case y == nil: + return x, nil // x ∪ ∅ == x + case x.typ == nil: + return x, nil // 𝓤 ∪ y == 𝓤 + case y.typ == nil: + return y, nil // x ∪ 𝓤 == 𝓤 + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return x, y // x ∪ y == (x, y) if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ∪ ~t == ~t + // ~t ∪ T == ~t + // T ∪ ~t == ~t + // T ∪ T == T + if x.tilde || !y.tilde { + return x, nil + } + return y, nil +} + +// intersect returns the intersection x ∩ y. +func (x *term) intersect(y *term) *term { + // easy cases + switch { + case x == nil || y == nil: + return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅ + case x.typ == nil: + return y // 𝓤 ∩ y == y + case y.typ == nil: + return x // x ∩ 𝓤 == x + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return nil // x ∩ y == ∅ if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ∩ ~t == ~t + // ~t ∩ T == T + // T ∩ ~t == T + // T ∩ T == T + if !x.tilde || y.tilde { + return x + } + return y +} + +// includes reports whether t ∈ x. +func (x *term) includes(t types.Type) bool { + // easy cases + switch { + case x == nil: + return false // t ∈ ∅ == false + case x.typ == nil: + return true // t ∈ 𝓤 == true + } + // ∅ ⊂ x ⊂ 𝓤 + + u := t + if x.tilde { + u = under(u) + } + return types.Identical(x.typ, u) +} + +// subsetOf reports whether x ⊆ y. +func (x *term) subsetOf(y *term) bool { + // easy cases + switch { + case x == nil: + return true // ∅ ⊆ y == true + case y == nil: + return false // x ⊆ ∅ == false since x != ∅ + case y.typ == nil: + return true // x ⊆ 𝓤 == true + case x.typ == nil: + return false // 𝓤 ⊆ y == false since y != 𝓤 + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return false // x ⊆ y == false if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ⊆ ~t == true + // ~t ⊆ T == false + // T ⊆ ~t == true + // T ⊆ T == true + return !x.tilde || y.tilde +} + +// disjoint reports whether x ∩ y == ∅. +// x.typ and y.typ must not be nil. +func (x *term) disjoint(y *term) bool { + if debug && (x.typ == nil || y.typ == nil) { + panic("invalid argument(s)") + } + ux := x.typ + if y.tilde { + ux = under(ux) + } + uy := y.typ + if x.tilde { + uy = under(uy) + } + return !types.Identical(ux, uy) +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go new file mode 100644 index 0000000000000..07484073a57d1 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go @@ -0,0 +1,1560 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +//go:generate stringer -type=ErrorCode + +type ErrorCode int + +// This file defines the error codes that can be produced during type-checking. +// Collectively, these codes provide an identifier that may be used to +// implement special handling for certain types of errors. +// +// Error codes should be fine-grained enough that the exact nature of the error +// can be easily determined, but coarse enough that they are not an +// implementation detail of the type checking algorithm. As a rule-of-thumb, +// errors should be considered equivalent if there is a theoretical refactoring +// of the type checker in which they are emitted in exactly one place. For +// example, the type checker emits different error messages for "too many +// arguments" and "too few arguments", but one can imagine an alternative type +// checker where this check instead just emits a single "wrong number of +// arguments", so these errors should have the same code. +// +// Error code names should be as brief as possible while retaining accuracy and +// distinctiveness. In most cases names should start with an adjective +// describing the nature of the error (e.g. "invalid", "unused", "misplaced"), +// and end with a noun identifying the relevant language object. For example, +// "DuplicateDecl" or "InvalidSliceExpr". For brevity, naming follows the +// convention that "bad" implies a problem with syntax, and "invalid" implies a +// problem with types. + +const ( + // InvalidSyntaxTree occurs if an invalid syntax tree is provided + // to the type checker. It should never happen. + InvalidSyntaxTree ErrorCode = -1 +) + +const ( + _ ErrorCode = iota + + // Test is reserved for errors that only apply while in self-test mode. + Test + + /* package names */ + + // BlankPkgName occurs when a package name is the blank identifier "_". + // + // Per the spec: + // "The PackageName must not be the blank identifier." + BlankPkgName + + // MismatchedPkgName occurs when a file's package name doesn't match the + // package name already established by other files. + MismatchedPkgName + + // InvalidPkgUse occurs when a package identifier is used outside of a + // selector expression. + // + // Example: + // import "fmt" + // + // var _ = fmt + InvalidPkgUse + + /* imports */ + + // BadImportPath occurs when an import path is not valid. + BadImportPath + + // BrokenImport occurs when importing a package fails. + // + // Example: + // import "amissingpackage" + BrokenImport + + // ImportCRenamed occurs when the special import "C" is renamed. "C" is a + // pseudo-package, and must not be renamed. + // + // Example: + // import _ "C" + ImportCRenamed + + // UnusedImport occurs when an import is unused. + // + // Example: + // import "fmt" + // + // func main() {} + UnusedImport + + /* initialization */ + + // InvalidInitCycle occurs when an invalid cycle is detected within the + // initialization graph. + // + // Example: + // var x int = f() + // + // func f() int { return x } + InvalidInitCycle + + /* decls */ + + // DuplicateDecl occurs when an identifier is declared multiple times. + // + // Example: + // var x = 1 + // var x = 2 + DuplicateDecl + + // InvalidDeclCycle occurs when a declaration cycle is not valid. + // + // Example: + // import "unsafe" + // + // type T struct { + // a [n]int + // } + // + // var n = unsafe.Sizeof(T{}) + InvalidDeclCycle + + // InvalidTypeCycle occurs when a cycle in type definitions results in a + // type that is not well-defined. + // + // Example: + // import "unsafe" + // + // type T [unsafe.Sizeof(T{})]int + InvalidTypeCycle + + /* decls > const */ + + // InvalidConstInit occurs when a const declaration has a non-constant + // initializer. + // + // Example: + // var x int + // const _ = x + InvalidConstInit + + // InvalidConstVal occurs when a const value cannot be converted to its + // target type. + // + // TODO(findleyr): this error code and example are not very clear. Consider + // removing it. + // + // Example: + // const _ = 1 << "hello" + InvalidConstVal + + // InvalidConstType occurs when the underlying type in a const declaration + // is not a valid constant type. + // + // Example: + // const c *int = 4 + InvalidConstType + + /* decls > var (+ other variable assignment codes) */ + + // UntypedNilUse occurs when the predeclared (untyped) value nil is used to + // initialize a variable declared without an explicit type. + // + // Example: + // var x = nil + UntypedNilUse + + // WrongAssignCount occurs when the number of values on the right-hand side + // of an assignment or or initialization expression does not match the number + // of variables on the left-hand side. + // + // Example: + // var x = 1, 2 + WrongAssignCount + + // UnassignableOperand occurs when the left-hand side of an assignment is + // not assignable. + // + // Example: + // func f() { + // const c = 1 + // c = 2 + // } + UnassignableOperand + + // NoNewVar occurs when a short variable declaration (':=') does not declare + // new variables. + // + // Example: + // func f() { + // x := 1 + // x := 2 + // } + NoNewVar + + // MultiValAssignOp occurs when an assignment operation (+=, *=, etc) does + // not have single-valued left-hand or right-hand side. + // + // Per the spec: + // "In assignment operations, both the left- and right-hand expression lists + // must contain exactly one single-valued expression" + // + // Example: + // func f() int { + // x, y := 1, 2 + // x, y += 1 + // return x + y + // } + MultiValAssignOp + + // InvalidIfaceAssign occurs when a value of type T is used as an + // interface, but T does not implement a method of the expected interface. + // + // Example: + // type I interface { + // f() + // } + // + // type T int + // + // var x I = T(1) + InvalidIfaceAssign + + // InvalidChanAssign occurs when a chan assignment is invalid. + // + // Per the spec, a value x is assignable to a channel type T if: + // "x is a bidirectional channel value, T is a channel type, x's type V and + // T have identical element types, and at least one of V or T is not a + // defined type." + // + // Example: + // type T1 chan int + // type T2 chan int + // + // var x T1 + // // Invalid assignment because both types are named + // var _ T2 = x + InvalidChanAssign + + // IncompatibleAssign occurs when the type of the right-hand side expression + // in an assignment cannot be assigned to the type of the variable being + // assigned. + // + // Example: + // var x []int + // var _ int = x + IncompatibleAssign + + // UnaddressableFieldAssign occurs when trying to assign to a struct field + // in a map value. + // + // Example: + // func f() { + // m := make(map[string]struct{i int}) + // m["foo"].i = 42 + // } + UnaddressableFieldAssign + + /* decls > type (+ other type expression codes) */ + + // NotAType occurs when the identifier used as the underlying type in a type + // declaration or the right-hand side of a type alias does not denote a type. + // + // Example: + // var S = 2 + // + // type T S + NotAType + + // InvalidArrayLen occurs when an array length is not a constant value. + // + // Example: + // var n = 3 + // var _ = [n]int{} + InvalidArrayLen + + // BlankIfaceMethod occurs when a method name is '_'. + // + // Per the spec: + // "The name of each explicitly specified method must be unique and not + // blank." + // + // Example: + // type T interface { + // _(int) + // } + BlankIfaceMethod + + // IncomparableMapKey occurs when a map key type does not support the == and + // != operators. + // + // Per the spec: + // "The comparison operators == and != must be fully defined for operands of + // the key type; thus the key type must not be a function, map, or slice." + // + // Example: + // var x map[T]int + // + // type T []int + IncomparableMapKey + + // InvalidIfaceEmbed occurs when a non-interface type is embedded in an + // interface. + // + // Example: + // type T struct {} + // + // func (T) m() + // + // type I interface { + // T + // } + InvalidIfaceEmbed + + // InvalidPtrEmbed occurs when an embedded field is of the pointer form *T, + // and T itself is itself a pointer, an unsafe.Pointer, or an interface. + // + // Per the spec: + // "An embedded field must be specified as a type name T or as a pointer to + // a non-interface type name *T, and T itself may not be a pointer type." + // + // Example: + // type T *int + // + // type S struct { + // *T + // } + InvalidPtrEmbed + + /* decls > func and method */ + + // BadRecv occurs when a method declaration does not have exactly one + // receiver parameter. + // + // Example: + // func () _() {} + BadRecv + + // InvalidRecv occurs when a receiver type expression is not of the form T + // or *T, or T is a pointer type. + // + // Example: + // type T struct {} + // + // func (**T) m() {} + InvalidRecv + + // DuplicateFieldAndMethod occurs when an identifier appears as both a field + // and method name. + // + // Example: + // type T struct { + // m int + // } + // + // func (T) m() {} + DuplicateFieldAndMethod + + // DuplicateMethod occurs when two methods on the same receiver type have + // the same name. + // + // Example: + // type T struct {} + // func (T) m() {} + // func (T) m(i int) int { return i } + DuplicateMethod + + /* decls > special */ + + // InvalidBlank occurs when a blank identifier is used as a value or type. + // + // Per the spec: + // "The blank identifier may appear as an operand only on the left-hand side + // of an assignment." + // + // Example: + // var x = _ + InvalidBlank + + // InvalidIota occurs when the predeclared identifier iota is used outside + // of a constant declaration. + // + // Example: + // var x = iota + InvalidIota + + // MissingInitBody occurs when an init function is missing its body. + // + // Example: + // func init() + MissingInitBody + + // InvalidInitSig occurs when an init function declares parameters or + // results. + // + // Example: + // func init() int { return 1 } + InvalidInitSig + + // InvalidInitDecl occurs when init is declared as anything other than a + // function. + // + // Example: + // var init = 1 + InvalidInitDecl + + // InvalidMainDecl occurs when main is declared as anything other than a + // function, in a main package. + InvalidMainDecl + + /* exprs */ + + // TooManyValues occurs when a function returns too many values for the + // expression context in which it is used. + // + // Example: + // func ReturnTwo() (int, int) { + // return 1, 2 + // } + // + // var x = ReturnTwo() + TooManyValues + + // NotAnExpr occurs when a type expression is used where a value expression + // is expected. + // + // Example: + // type T struct {} + // + // func f() { + // T + // } + NotAnExpr + + /* exprs > const */ + + // TruncatedFloat occurs when a float constant is truncated to an integer + // value. + // + // Example: + // var _ int = 98.6 + TruncatedFloat + + // NumericOverflow occurs when a numeric constant overflows its target type. + // + // Example: + // var x int8 = 1000 + NumericOverflow + + /* exprs > operation */ + + // UndefinedOp occurs when an operator is not defined for the type(s) used + // in an operation. + // + // Example: + // var c = "a" - "b" + UndefinedOp + + // MismatchedTypes occurs when operand types are incompatible in a binary + // operation. + // + // Example: + // var a = "hello" + // var b = 1 + // var c = a - b + MismatchedTypes + + // DivByZero occurs when a division operation is provable at compile + // time to be a division by zero. + // + // Example: + // const divisor = 0 + // var x int = 1/divisor + DivByZero + + // NonNumericIncDec occurs when an increment or decrement operator is + // applied to a non-numeric value. + // + // Example: + // func f() { + // var c = "c" + // c++ + // } + NonNumericIncDec + + /* exprs > ptr */ + + // UnaddressableOperand occurs when the & operator is applied to an + // unaddressable expression. + // + // Example: + // var x = &1 + UnaddressableOperand + + // InvalidIndirection occurs when a non-pointer value is indirected via the + // '*' operator. + // + // Example: + // var x int + // var y = *x + InvalidIndirection + + /* exprs > [] */ + + // NonIndexableOperand occurs when an index operation is applied to a value + // that cannot be indexed. + // + // Example: + // var x = 1 + // var y = x[1] + NonIndexableOperand + + // InvalidIndex occurs when an index argument is not of integer type, + // negative, or out-of-bounds. + // + // Example: + // var s = [...]int{1,2,3} + // var x = s[5] + // + // Example: + // var s = []int{1,2,3} + // var _ = s[-1] + // + // Example: + // var s = []int{1,2,3} + // var i string + // var _ = s[i] + InvalidIndex + + // SwappedSliceIndices occurs when constant indices in a slice expression + // are decreasing in value. + // + // Example: + // var _ = []int{1,2,3}[2:1] + SwappedSliceIndices + + /* operators > slice */ + + // NonSliceableOperand occurs when a slice operation is applied to a value + // whose type is not sliceable, or is unaddressable. + // + // Example: + // var x = [...]int{1, 2, 3}[:1] + // + // Example: + // var x = 1 + // var y = 1[:1] + NonSliceableOperand + + // InvalidSliceExpr occurs when a three-index slice expression (a[x:y:z]) is + // applied to a string. + // + // Example: + // var s = "hello" + // var x = s[1:2:3] + InvalidSliceExpr + + /* exprs > shift */ + + // InvalidShiftCount occurs when the right-hand side of a shift operation is + // either non-integer, negative, or too large. + // + // Example: + // var ( + // x string + // y int = 1 << x + // ) + InvalidShiftCount + + // InvalidShiftOperand occurs when the shifted operand is not an integer. + // + // Example: + // var s = "hello" + // var x = s << 2 + InvalidShiftOperand + + /* exprs > chan */ + + // InvalidReceive occurs when there is a channel receive from a value that + // is either not a channel, or is a send-only channel. + // + // Example: + // func f() { + // var x = 1 + // <-x + // } + InvalidReceive + + // InvalidSend occurs when there is a channel send to a value that is not a + // channel, or is a receive-only channel. + // + // Example: + // func f() { + // var x = 1 + // x <- "hello!" + // } + InvalidSend + + /* exprs > literal */ + + // DuplicateLitKey occurs when an index is duplicated in a slice, array, or + // map literal. + // + // Example: + // var _ = []int{0:1, 0:2} + // + // Example: + // var _ = map[string]int{"a": 1, "a": 2} + DuplicateLitKey + + // MissingLitKey occurs when a map literal is missing a key expression. + // + // Example: + // var _ = map[string]int{1} + MissingLitKey + + // InvalidLitIndex occurs when the key in a key-value element of a slice or + // array literal is not an integer constant. + // + // Example: + // var i = 0 + // var x = []string{i: "world"} + InvalidLitIndex + + // OversizeArrayLit occurs when an array literal exceeds its length. + // + // Example: + // var _ = [2]int{1,2,3} + OversizeArrayLit + + // MixedStructLit occurs when a struct literal contains a mix of positional + // and named elements. + // + // Example: + // var _ = struct{i, j int}{i: 1, 2} + MixedStructLit + + // InvalidStructLit occurs when a positional struct literal has an incorrect + // number of values. + // + // Example: + // var _ = struct{i, j int}{1,2,3} + InvalidStructLit + + // MissingLitField occurs when a struct literal refers to a field that does + // not exist on the struct type. + // + // Example: + // var _ = struct{i int}{j: 2} + MissingLitField + + // DuplicateLitField occurs when a struct literal contains duplicated + // fields. + // + // Example: + // var _ = struct{i int}{i: 1, i: 2} + DuplicateLitField + + // UnexportedLitField occurs when a positional struct literal implicitly + // assigns an unexported field of an imported type. + UnexportedLitField + + // InvalidLitField occurs when a field name is not a valid identifier. + // + // Example: + // var _ = struct{i int}{1: 1} + InvalidLitField + + // UntypedLit occurs when a composite literal omits a required type + // identifier. + // + // Example: + // type outer struct{ + // inner struct { i int } + // } + // + // var _ = outer{inner: {1}} + UntypedLit + + // InvalidLit occurs when a composite literal expression does not match its + // type. + // + // Example: + // type P *struct{ + // x int + // } + // var _ = P {} + InvalidLit + + /* exprs > selector */ + + // AmbiguousSelector occurs when a selector is ambiguous. + // + // Example: + // type E1 struct { i int } + // type E2 struct { i int } + // type T struct { E1; E2 } + // + // var x T + // var _ = x.i + AmbiguousSelector + + // UndeclaredImportedName occurs when a package-qualified identifier is + // undeclared by the imported package. + // + // Example: + // import "go/types" + // + // var _ = types.NotAnActualIdentifier + UndeclaredImportedName + + // UnexportedName occurs when a selector refers to an unexported identifier + // of an imported package. + // + // Example: + // import "reflect" + // + // type _ reflect.flag + UnexportedName + + // UndeclaredName occurs when an identifier is not declared in the current + // scope. + // + // Example: + // var x T + UndeclaredName + + // MissingFieldOrMethod occurs when a selector references a field or method + // that does not exist. + // + // Example: + // type T struct {} + // + // var x = T{}.f + MissingFieldOrMethod + + /* exprs > ... */ + + // BadDotDotDotSyntax occurs when a "..." occurs in a context where it is + // not valid. + // + // Example: + // var _ = map[int][...]int{0: {}} + BadDotDotDotSyntax + + // NonVariadicDotDotDot occurs when a "..." is used on the final argument to + // a non-variadic function. + // + // Example: + // func printArgs(s []string) { + // for _, a := range s { + // println(a) + // } + // } + // + // func f() { + // s := []string{"a", "b", "c"} + // printArgs(s...) + // } + NonVariadicDotDotDot + + // MisplacedDotDotDot occurs when a "..." is used somewhere other than the + // final argument to a function call. + // + // Example: + // func printArgs(args ...int) { + // for _, a := range args { + // println(a) + // } + // } + // + // func f() { + // a := []int{1,2,3} + // printArgs(0, a...) + // } + MisplacedDotDotDot + + // InvalidDotDotDotOperand occurs when a "..." operator is applied to a + // single-valued operand. + // + // Example: + // func printArgs(args ...int) { + // for _, a := range args { + // println(a) + // } + // } + // + // func f() { + // a := 1 + // printArgs(a...) + // } + // + // Example: + // func args() (int, int) { + // return 1, 2 + // } + // + // func printArgs(args ...int) { + // for _, a := range args { + // println(a) + // } + // } + // + // func g() { + // printArgs(args()...) + // } + InvalidDotDotDotOperand + + // InvalidDotDotDot occurs when a "..." is used in a non-variadic built-in + // function. + // + // Example: + // var s = []int{1, 2, 3} + // var l = len(s...) + InvalidDotDotDot + + /* exprs > built-in */ + + // UncalledBuiltin occurs when a built-in function is used as a + // function-valued expression, instead of being called. + // + // Per the spec: + // "The built-in functions do not have standard Go types, so they can only + // appear in call expressions; they cannot be used as function values." + // + // Example: + // var _ = copy + UncalledBuiltin + + // InvalidAppend occurs when append is called with a first argument that is + // not a slice. + // + // Example: + // var _ = append(1, 2) + InvalidAppend + + // InvalidCap occurs when an argument to the cap built-in function is not of + // supported type. + // + // See https://golang.org/ref/spec#Lengthand_capacity for information on + // which underlying types are supported as arguments to cap and len. + // + // Example: + // var s = 2 + // var x = cap(s) + InvalidCap + + // InvalidClose occurs when close(...) is called with an argument that is + // not of channel type, or that is a receive-only channel. + // + // Example: + // func f() { + // var x int + // close(x) + // } + InvalidClose + + // InvalidCopy occurs when the arguments are not of slice type or do not + // have compatible type. + // + // See https://golang.org/ref/spec#Appendingand_copying_slices for more + // information on the type requirements for the copy built-in. + // + // Example: + // func f() { + // var x []int + // y := []int64{1,2,3} + // copy(x, y) + // } + InvalidCopy + + // InvalidComplex occurs when the complex built-in function is called with + // arguments with incompatible types. + // + // Example: + // var _ = complex(float32(1), float64(2)) + InvalidComplex + + // InvalidDelete occurs when the delete built-in function is called with a + // first argument that is not a map. + // + // Example: + // func f() { + // m := "hello" + // delete(m, "e") + // } + InvalidDelete + + // InvalidImag occurs when the imag built-in function is called with an + // argument that does not have complex type. + // + // Example: + // var _ = imag(int(1)) + InvalidImag + + // InvalidLen occurs when an argument to the len built-in function is not of + // supported type. + // + // See https://golang.org/ref/spec#Lengthand_capacity for information on + // which underlying types are supported as arguments to cap and len. + // + // Example: + // var s = 2 + // var x = len(s) + InvalidLen + + // SwappedMakeArgs occurs when make is called with three arguments, and its + // length argument is larger than its capacity argument. + // + // Example: + // var x = make([]int, 3, 2) + SwappedMakeArgs + + // InvalidMake occurs when make is called with an unsupported type argument. + // + // See https://golang.org/ref/spec#Makingslices_maps_and_channels for + // information on the types that may be created using make. + // + // Example: + // var x = make(int) + InvalidMake + + // InvalidReal occurs when the real built-in function is called with an + // argument that does not have complex type. + // + // Example: + // var _ = real(int(1)) + InvalidReal + + /* exprs > assertion */ + + // InvalidAssert occurs when a type assertion is applied to a + // value that is not of interface type. + // + // Example: + // var x = 1 + // var _ = x.(float64) + InvalidAssert + + // ImpossibleAssert occurs for a type assertion x.(T) when the value x of + // interface cannot have dynamic type T, due to a missing or mismatching + // method on T. + // + // Example: + // type T int + // + // func (t *T) m() int { return int(*t) } + // + // type I interface { m() int } + // + // var x I + // var _ = x.(T) + ImpossibleAssert + + /* exprs > conversion */ + + // InvalidConversion occurs when the argument type cannot be converted to the + // target. + // + // See https://golang.org/ref/spec#Conversions for the rules of + // convertibility. + // + // Example: + // var x float64 + // var _ = string(x) + InvalidConversion + + // InvalidUntypedConversion occurs when an there is no valid implicit + // conversion from an untyped value satisfying the type constraints of the + // context in which it is used. + // + // Example: + // var _ = 1 + "" + InvalidUntypedConversion + + /* offsetof */ + + // BadOffsetofSyntax occurs when unsafe.Offsetof is called with an argument + // that is not a selector expression. + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Offsetof(x) + BadOffsetofSyntax + + // InvalidOffsetof occurs when unsafe.Offsetof is called with a method + // selector, rather than a field selector, or when the field is embedded via + // a pointer. + // + // Per the spec: + // + // "If f is an embedded field, it must be reachable without pointer + // indirections through fields of the struct. " + // + // Example: + // import "unsafe" + // + // type T struct { f int } + // type S struct { *T } + // var s S + // var _ = unsafe.Offsetof(s.f) + // + // Example: + // import "unsafe" + // + // type S struct{} + // + // func (S) m() {} + // + // var s S + // var _ = unsafe.Offsetof(s.m) + InvalidOffsetof + + /* control flow > scope */ + + // UnusedExpr occurs when a side-effect free expression is used as a + // statement. Such a statement has no effect. + // + // Example: + // func f(i int) { + // i*i + // } + UnusedExpr + + // UnusedVar occurs when a variable is declared but unused. + // + // Example: + // func f() { + // x := 1 + // } + UnusedVar + + // MissingReturn occurs when a function with results is missing a return + // statement. + // + // Example: + // func f() int {} + MissingReturn + + // WrongResultCount occurs when a return statement returns an incorrect + // number of values. + // + // Example: + // func ReturnOne() int { + // return 1, 2 + // } + WrongResultCount + + // OutOfScopeResult occurs when the name of a value implicitly returned by + // an empty return statement is shadowed in a nested scope. + // + // Example: + // func factor(n int) (i int) { + // for i := 2; i < n; i++ { + // if n%i == 0 { + // return + // } + // } + // return 0 + // } + OutOfScopeResult + + /* control flow > if */ + + // InvalidCond occurs when an if condition is not a boolean expression. + // + // Example: + // func checkReturn(i int) { + // if i { + // panic("non-zero return") + // } + // } + InvalidCond + + /* control flow > for */ + + // InvalidPostDecl occurs when there is a declaration in a for-loop post + // statement. + // + // Example: + // func f() { + // for i := 0; i < 10; j := 0 {} + // } + InvalidPostDecl + + // InvalidChanRange occurs when a send-only channel used in a range + // expression. + // + // Example: + // func sum(c chan<- int) { + // s := 0 + // for i := range c { + // s += i + // } + // } + InvalidChanRange + + // InvalidIterVar occurs when two iteration variables are used while ranging + // over a channel. + // + // Example: + // func f(c chan int) { + // for k, v := range c { + // println(k, v) + // } + // } + InvalidIterVar + + // InvalidRangeExpr occurs when the type of a range expression is not array, + // slice, string, map, or channel. + // + // Example: + // func f(i int) { + // for j := range i { + // println(j) + // } + // } + InvalidRangeExpr + + /* control flow > switch */ + + // MisplacedBreak occurs when a break statement is not within a for, switch, + // or select statement of the innermost function definition. + // + // Example: + // func f() { + // break + // } + MisplacedBreak + + // MisplacedContinue occurs when a continue statement is not within a for + // loop of the innermost function definition. + // + // Example: + // func sumeven(n int) int { + // proceed := func() { + // continue + // } + // sum := 0 + // for i := 1; i <= n; i++ { + // if i % 2 != 0 { + // proceed() + // } + // sum += i + // } + // return sum + // } + MisplacedContinue + + // MisplacedFallthrough occurs when a fallthrough statement is not within an + // expression switch. + // + // Example: + // func typename(i interface{}) string { + // switch i.(type) { + // case int64: + // fallthrough + // case int: + // return "int" + // } + // return "unsupported" + // } + MisplacedFallthrough + + // DuplicateCase occurs when a type or expression switch has duplicate + // cases. + // + // Example: + // func printInt(i int) { + // switch i { + // case 1: + // println("one") + // case 1: + // println("One") + // } + // } + DuplicateCase + + // DuplicateDefault occurs when a type or expression switch has multiple + // default clauses. + // + // Example: + // func printInt(i int) { + // switch i { + // case 1: + // println("one") + // default: + // println("One") + // default: + // println("1") + // } + // } + DuplicateDefault + + // BadTypeKeyword occurs when a .(type) expression is used anywhere other + // than a type switch. + // + // Example: + // type I interface { + // m() + // } + // var t I + // var _ = t.(type) + BadTypeKeyword + + // InvalidTypeSwitch occurs when .(type) is used on an expression that is + // not of interface type. + // + // Example: + // func f(i int) { + // switch x := i.(type) {} + // } + InvalidTypeSwitch + + // InvalidExprSwitch occurs when a switch expression is not comparable. + // + // Example: + // func _() { + // var a struct{ _ func() } + // switch a /* ERROR cannot switch on a */ { + // } + // } + InvalidExprSwitch + + /* control flow > select */ + + // InvalidSelectCase occurs when a select case is not a channel send or + // receive. + // + // Example: + // func checkChan(c <-chan int) bool { + // select { + // case c: + // return true + // default: + // return false + // } + // } + InvalidSelectCase + + /* control flow > labels and jumps */ + + // UndeclaredLabel occurs when an undeclared label is jumped to. + // + // Example: + // func f() { + // goto L + // } + UndeclaredLabel + + // DuplicateLabel occurs when a label is declared more than once. + // + // Example: + // func f() int { + // L: + // L: + // return 1 + // } + DuplicateLabel + + // MisplacedLabel occurs when a break or continue label is not on a for, + // switch, or select statement. + // + // Example: + // func f() { + // L: + // a := []int{1,2,3} + // for _, e := range a { + // if e > 10 { + // break L + // } + // println(a) + // } + // } + MisplacedLabel + + // UnusedLabel occurs when a label is declared but not used. + // + // Example: + // func f() { + // L: + // } + UnusedLabel + + // JumpOverDecl occurs when a label jumps over a variable declaration. + // + // Example: + // func f() int { + // goto L + // x := 2 + // L: + // x++ + // return x + // } + JumpOverDecl + + // JumpIntoBlock occurs when a forward jump goes to a label inside a nested + // block. + // + // Example: + // func f(x int) { + // goto L + // if x > 0 { + // L: + // print("inside block") + // } + // } + JumpIntoBlock + + /* control flow > calls */ + + // InvalidMethodExpr occurs when a pointer method is called but the argument + // is not addressable. + // + // Example: + // type T struct {} + // + // func (*T) m() int { return 1 } + // + // var _ = T.m(T{}) + InvalidMethodExpr + + // WrongArgCount occurs when too few or too many arguments are passed by a + // function call. + // + // Example: + // func f(i int) {} + // var x = f() + WrongArgCount + + // InvalidCall occurs when an expression is called that is not of function + // type. + // + // Example: + // var x = "x" + // var y = x() + InvalidCall + + /* control flow > suspended */ + + // UnusedResults occurs when a restricted expression-only built-in function + // is suspended via go or defer. Such a suspension discards the results of + // these side-effect free built-in functions, and therefore is ineffectual. + // + // Example: + // func f(a []int) int { + // defer len(a) + // return i + // } + UnusedResults + + // InvalidDefer occurs when a deferred expression is not a function call, + // for example if the expression is a type conversion. + // + // Example: + // func f(i int) int { + // defer int32(i) + // return i + // } + InvalidDefer + + // InvalidGo occurs when a go expression is not a function call, for example + // if the expression is a type conversion. + // + // Example: + // func f(i int) int { + // go int32(i) + // return i + // } + InvalidGo + + // All codes below were added in Go 1.17. + + /* decl */ + + // BadDecl occurs when a declaration has invalid syntax. + BadDecl + + // RepeatedDecl occurs when an identifier occurs more than once on the left + // hand side of a short variable declaration. + // + // Example: + // func _() { + // x, y, y := 1, 2, 3 + // } + RepeatedDecl + + /* unsafe */ + + // InvalidUnsafeAdd occurs when unsafe.Add is called with a + // length argument that is not of integer type. + // + // Example: + // import "unsafe" + // + // var p unsafe.Pointer + // var _ = unsafe.Add(p, float64(1)) + InvalidUnsafeAdd + + // InvalidUnsafeSlice occurs when unsafe.Slice is called with a + // pointer argument that is not of pointer type or a length argument + // that is not of integer type, negative, or out of bounds. + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Slice(x, 1) + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Slice(&x, float64(1)) + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Slice(&x, -1) + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.Slice(&x, uint64(1) << 63) + InvalidUnsafeSlice + + // All codes below were added in Go 1.18. + + /* features */ + + // UnsupportedFeature occurs when a language feature is used that is not + // supported at this Go version. + UnsupportedFeature + + /* type params */ + + // NotAGenericType occurs when a non-generic type is used where a generic + // type is expected: in type or function instantiation. + // + // Example: + // type T int + // + // var _ T[int] + NotAGenericType + + // WrongTypeArgCount occurs when a type or function is instantiated with an + // incorrent number of type arguments, including when a generic type or + // function is used without instantiation. + // + // Errors inolving failed type inference are assigned other error codes. + // + // Example: + // type T[p any] int + // + // var _ T[int, string] + // + // Example: + // func f[T any]() {} + // + // var x = f + WrongTypeArgCount + + // CannotInferTypeArgs occurs when type or function type argument inference + // fails to infer all type arguments. + // + // Example: + // func f[T any]() {} + // + // func _() { + // f() + // } + // + // Example: + // type N[P, Q any] struct{} + // + // var _ N[int] + CannotInferTypeArgs + + // InvalidTypeArg occurs when a type argument does not satisfy its + // corresponding type parameter constraints. + // + // Example: + // type T[P ~int] struct{} + // + // var _ T[string] + InvalidTypeArg // arguments? InferenceFailed + + // InvalidInstanceCycle occurs when an invalid cycle is detected + // within the instantiation graph. + // + // Example: + // func f[T any]() { f[*T]() } + InvalidInstanceCycle + + // InvalidUnion occurs when an embedded union or approximation element is + // not valid. + // + // Example: + // type _ interface { + // ~int | interface{ m() } + // } + InvalidUnion + + // MisplacedConstraintIface occurs when a constraint-type interface is used + // outside of constraint position. + // + // Example: + // type I interface { ~int } + // + // var _ I + MisplacedConstraintIface + + // InvalidMethodTypeParams occurs when methods have type parameters. + // + // It cannot be encountered with an AST parsed using go/parser. + InvalidMethodTypeParams + + // MisplacedTypeParam occurs when a type parameter is used in a place where + // it is not permitted. + // + // Example: + // type T[P any] P + // + // Example: + // type T[P any] struct{ *P } + MisplacedTypeParam + + // InvalidUnsafeSliceData occurs when unsafe.SliceData is called with + // an argument that is not of slice type. It also occurs if it is used + // in a package compiled for a language version before go1.20. + // + // Example: + // import "unsafe" + // + // var x int + // var _ = unsafe.SliceData(x) + InvalidUnsafeSliceData + + // InvalidUnsafeString occurs when unsafe.String is called with + // a length argument that is not of integer type, negative, or + // out of bounds. It also occurs if it is used in a package + // compiled for a language version before go1.20. + // + // Example: + // import "unsafe" + // + // var b [10]byte + // var _ = unsafe.String(&b[0], -1) + InvalidUnsafeString + + // InvalidUnsafeStringData occurs if it is used in a package + // compiled for a language version before go1.20. + _ // not used anymore + +) diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go new file mode 100644 index 0000000000000..15ecf7c5ded91 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode_string.go @@ -0,0 +1,179 @@ +// Code generated by "stringer -type=ErrorCode"; DO NOT EDIT. + +package typesinternal + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[InvalidSyntaxTree - -1] + _ = x[Test-1] + _ = x[BlankPkgName-2] + _ = x[MismatchedPkgName-3] + _ = x[InvalidPkgUse-4] + _ = x[BadImportPath-5] + _ = x[BrokenImport-6] + _ = x[ImportCRenamed-7] + _ = x[UnusedImport-8] + _ = x[InvalidInitCycle-9] + _ = x[DuplicateDecl-10] + _ = x[InvalidDeclCycle-11] + _ = x[InvalidTypeCycle-12] + _ = x[InvalidConstInit-13] + _ = x[InvalidConstVal-14] + _ = x[InvalidConstType-15] + _ = x[UntypedNilUse-16] + _ = x[WrongAssignCount-17] + _ = x[UnassignableOperand-18] + _ = x[NoNewVar-19] + _ = x[MultiValAssignOp-20] + _ = x[InvalidIfaceAssign-21] + _ = x[InvalidChanAssign-22] + _ = x[IncompatibleAssign-23] + _ = x[UnaddressableFieldAssign-24] + _ = x[NotAType-25] + _ = x[InvalidArrayLen-26] + _ = x[BlankIfaceMethod-27] + _ = x[IncomparableMapKey-28] + _ = x[InvalidIfaceEmbed-29] + _ = x[InvalidPtrEmbed-30] + _ = x[BadRecv-31] + _ = x[InvalidRecv-32] + _ = x[DuplicateFieldAndMethod-33] + _ = x[DuplicateMethod-34] + _ = x[InvalidBlank-35] + _ = x[InvalidIota-36] + _ = x[MissingInitBody-37] + _ = x[InvalidInitSig-38] + _ = x[InvalidInitDecl-39] + _ = x[InvalidMainDecl-40] + _ = x[TooManyValues-41] + _ = x[NotAnExpr-42] + _ = x[TruncatedFloat-43] + _ = x[NumericOverflow-44] + _ = x[UndefinedOp-45] + _ = x[MismatchedTypes-46] + _ = x[DivByZero-47] + _ = x[NonNumericIncDec-48] + _ = x[UnaddressableOperand-49] + _ = x[InvalidIndirection-50] + _ = x[NonIndexableOperand-51] + _ = x[InvalidIndex-52] + _ = x[SwappedSliceIndices-53] + _ = x[NonSliceableOperand-54] + _ = x[InvalidSliceExpr-55] + _ = x[InvalidShiftCount-56] + _ = x[InvalidShiftOperand-57] + _ = x[InvalidReceive-58] + _ = x[InvalidSend-59] + _ = x[DuplicateLitKey-60] + _ = x[MissingLitKey-61] + _ = x[InvalidLitIndex-62] + _ = x[OversizeArrayLit-63] + _ = x[MixedStructLit-64] + _ = x[InvalidStructLit-65] + _ = x[MissingLitField-66] + _ = x[DuplicateLitField-67] + _ = x[UnexportedLitField-68] + _ = x[InvalidLitField-69] + _ = x[UntypedLit-70] + _ = x[InvalidLit-71] + _ = x[AmbiguousSelector-72] + _ = x[UndeclaredImportedName-73] + _ = x[UnexportedName-74] + _ = x[UndeclaredName-75] + _ = x[MissingFieldOrMethod-76] + _ = x[BadDotDotDotSyntax-77] + _ = x[NonVariadicDotDotDot-78] + _ = x[MisplacedDotDotDot-79] + _ = x[InvalidDotDotDotOperand-80] + _ = x[InvalidDotDotDot-81] + _ = x[UncalledBuiltin-82] + _ = x[InvalidAppend-83] + _ = x[InvalidCap-84] + _ = x[InvalidClose-85] + _ = x[InvalidCopy-86] + _ = x[InvalidComplex-87] + _ = x[InvalidDelete-88] + _ = x[InvalidImag-89] + _ = x[InvalidLen-90] + _ = x[SwappedMakeArgs-91] + _ = x[InvalidMake-92] + _ = x[InvalidReal-93] + _ = x[InvalidAssert-94] + _ = x[ImpossibleAssert-95] + _ = x[InvalidConversion-96] + _ = x[InvalidUntypedConversion-97] + _ = x[BadOffsetofSyntax-98] + _ = x[InvalidOffsetof-99] + _ = x[UnusedExpr-100] + _ = x[UnusedVar-101] + _ = x[MissingReturn-102] + _ = x[WrongResultCount-103] + _ = x[OutOfScopeResult-104] + _ = x[InvalidCond-105] + _ = x[InvalidPostDecl-106] + _ = x[InvalidChanRange-107] + _ = x[InvalidIterVar-108] + _ = x[InvalidRangeExpr-109] + _ = x[MisplacedBreak-110] + _ = x[MisplacedContinue-111] + _ = x[MisplacedFallthrough-112] + _ = x[DuplicateCase-113] + _ = x[DuplicateDefault-114] + _ = x[BadTypeKeyword-115] + _ = x[InvalidTypeSwitch-116] + _ = x[InvalidExprSwitch-117] + _ = x[InvalidSelectCase-118] + _ = x[UndeclaredLabel-119] + _ = x[DuplicateLabel-120] + _ = x[MisplacedLabel-121] + _ = x[UnusedLabel-122] + _ = x[JumpOverDecl-123] + _ = x[JumpIntoBlock-124] + _ = x[InvalidMethodExpr-125] + _ = x[WrongArgCount-126] + _ = x[InvalidCall-127] + _ = x[UnusedResults-128] + _ = x[InvalidDefer-129] + _ = x[InvalidGo-130] + _ = x[BadDecl-131] + _ = x[RepeatedDecl-132] + _ = x[InvalidUnsafeAdd-133] + _ = x[InvalidUnsafeSlice-134] + _ = x[UnsupportedFeature-135] + _ = x[NotAGenericType-136] + _ = x[WrongTypeArgCount-137] + _ = x[CannotInferTypeArgs-138] + _ = x[InvalidTypeArg-139] + _ = x[InvalidInstanceCycle-140] + _ = x[InvalidUnion-141] + _ = x[MisplacedConstraintIface-142] + _ = x[InvalidMethodTypeParams-143] + _ = x[MisplacedTypeParam-144] + _ = x[InvalidUnsafeSliceData-145] + _ = x[InvalidUnsafeString-146] +} + +const ( + _ErrorCode_name_0 = "InvalidSyntaxTree" + _ErrorCode_name_1 = "TestBlankPkgNameMismatchedPkgNameInvalidPkgUseBadImportPathBrokenImportImportCRenamedUnusedImportInvalidInitCycleDuplicateDeclInvalidDeclCycleInvalidTypeCycleInvalidConstInitInvalidConstValInvalidConstTypeUntypedNilUseWrongAssignCountUnassignableOperandNoNewVarMultiValAssignOpInvalidIfaceAssignInvalidChanAssignIncompatibleAssignUnaddressableFieldAssignNotATypeInvalidArrayLenBlankIfaceMethodIncomparableMapKeyInvalidIfaceEmbedInvalidPtrEmbedBadRecvInvalidRecvDuplicateFieldAndMethodDuplicateMethodInvalidBlankInvalidIotaMissingInitBodyInvalidInitSigInvalidInitDeclInvalidMainDeclTooManyValuesNotAnExprTruncatedFloatNumericOverflowUndefinedOpMismatchedTypesDivByZeroNonNumericIncDecUnaddressableOperandInvalidIndirectionNonIndexableOperandInvalidIndexSwappedSliceIndicesNonSliceableOperandInvalidSliceExprInvalidShiftCountInvalidShiftOperandInvalidReceiveInvalidSendDuplicateLitKeyMissingLitKeyInvalidLitIndexOversizeArrayLitMixedStructLitInvalidStructLitMissingLitFieldDuplicateLitFieldUnexportedLitFieldInvalidLitFieldUntypedLitInvalidLitAmbiguousSelectorUndeclaredImportedNameUnexportedNameUndeclaredNameMissingFieldOrMethodBadDotDotDotSyntaxNonVariadicDotDotDotMisplacedDotDotDotInvalidDotDotDotOperandInvalidDotDotDotUncalledBuiltinInvalidAppendInvalidCapInvalidCloseInvalidCopyInvalidComplexInvalidDeleteInvalidImagInvalidLenSwappedMakeArgsInvalidMakeInvalidRealInvalidAssertImpossibleAssertInvalidConversionInvalidUntypedConversionBadOffsetofSyntaxInvalidOffsetofUnusedExprUnusedVarMissingReturnWrongResultCountOutOfScopeResultInvalidCondInvalidPostDeclInvalidChanRangeInvalidIterVarInvalidRangeExprMisplacedBreakMisplacedContinueMisplacedFallthroughDuplicateCaseDuplicateDefaultBadTypeKeywordInvalidTypeSwitchInvalidExprSwitchInvalidSelectCaseUndeclaredLabelDuplicateLabelMisplacedLabelUnusedLabelJumpOverDeclJumpIntoBlockInvalidMethodExprWrongArgCountInvalidCallUnusedResultsInvalidDeferInvalidGoBadDeclRepeatedDeclInvalidUnsafeAddInvalidUnsafeSliceUnsupportedFeatureNotAGenericTypeWrongTypeArgCountCannotInferTypeArgsInvalidTypeArgInvalidInstanceCycleInvalidUnionMisplacedConstraintIfaceInvalidMethodTypeParamsMisplacedTypeParamInvalidUnsafeSliceDataInvalidUnsafeString" +) + +var ( + _ErrorCode_index_1 = [...]uint16{0, 4, 16, 33, 46, 59, 71, 85, 97, 113, 126, 142, 158, 174, 189, 205, 218, 234, 253, 261, 277, 295, 312, 330, 354, 362, 377, 393, 411, 428, 443, 450, 461, 484, 499, 511, 522, 537, 551, 566, 581, 594, 603, 617, 632, 643, 658, 667, 683, 703, 721, 740, 752, 771, 790, 806, 823, 842, 856, 867, 882, 895, 910, 926, 940, 956, 971, 988, 1006, 1021, 1031, 1041, 1058, 1080, 1094, 1108, 1128, 1146, 1166, 1184, 1207, 1223, 1238, 1251, 1261, 1273, 1284, 1298, 1311, 1322, 1332, 1347, 1358, 1369, 1382, 1398, 1415, 1439, 1456, 1471, 1481, 1490, 1503, 1519, 1535, 1546, 1561, 1577, 1591, 1607, 1621, 1638, 1658, 1671, 1687, 1701, 1718, 1735, 1752, 1767, 1781, 1795, 1806, 1818, 1831, 1848, 1861, 1872, 1885, 1897, 1906, 1913, 1925, 1941, 1959, 1977, 1992, 2009, 2028, 2042, 2062, 2074, 2098, 2121, 2139, 2161, 2180} +) + +func (i ErrorCode) String() string { + switch { + case i == -1: + return _ErrorCode_name_0 + case 1 <= i && i <= 146: + i -= 1 + return _ErrorCode_name_1[_ErrorCode_index_1[i]:_ErrorCode_index_1[i+1]] + default: + return "ErrorCode(" + strconv.FormatInt(int64(i), 10) + ")" + } +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go new file mode 100644 index 0000000000000..ce7d4351b2203 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -0,0 +1,52 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typesinternal provides access to internal go/types APIs that are not +// yet exported. +package typesinternal + +import ( + "go/token" + "go/types" + "reflect" + "unsafe" +) + +func SetUsesCgo(conf *types.Config) bool { + v := reflect.ValueOf(conf).Elem() + + f := v.FieldByName("go115UsesCgo") + if !f.IsValid() { + f = v.FieldByName("UsesCgo") + if !f.IsValid() { + return false + } + } + + addr := unsafe.Pointer(f.UnsafeAddr()) + *(*bool)(addr) = true + + return true +} + +// ReadGo116ErrorData extracts additional information from types.Error values +// generated by Go version 1.16 and later: the error code, start position, and +// end position. If all positions are valid, start <= err.Pos <= end. +// +// If the data could not be read, the final result parameter will be false. +func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) { + var data [3]int + // By coincidence all of these fields are ints, which simplifies things. + v := reflect.ValueOf(err) + for i, name := range []string{"go116code", "go116start", "go116end"} { + f := v.FieldByName(name) + if !f.IsValid() { + return 0, 0, 0, false + } + data[i] = int(f.Int()) + } + return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true +} + +var SetGoVersion = func(conf *types.Config, version string) bool { return false } diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types_118.go b/vendor/golang.org/x/tools/internal/typesinternal/types_118.go new file mode 100644 index 0000000000000..a42b072a67d3b --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/types_118.go @@ -0,0 +1,19 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.18 +// +build go1.18 + +package typesinternal + +import ( + "go/types" +) + +func init() { + SetGoVersion = func(conf *types.Config, version string) bool { + conf.GoVersion = version + return true + } +} diff --git a/vendor/google.golang.org/api/googleapi/googleapi.go b/vendor/google.golang.org/api/googleapi/googleapi.go index 75248fd16ec1c..b328a7976ab54 100644 --- a/vendor/google.golang.org/api/googleapi/googleapi.go +++ b/vendor/google.golang.org/api/googleapi/googleapi.go @@ -79,6 +79,9 @@ type Error struct { Header http.Header Errors []ErrorItem + // err is typically a wrapped apierror.APIError, see + // google-api-go-client/internal/gensupport/error.go. + err error } // ErrorItem is a detailed error code & message from the Google API frontend. @@ -122,6 +125,15 @@ func (e *Error) Error() string { return buf.String() } +// Wrap allows an existing Error to wrap another error. See also [Error.Unwrap]. +func (e *Error) Wrap(err error) { + e.err = err +} + +func (e *Error) Unwrap() error { + return e.err +} + type errorReply struct { Error *Error `json:"error"` } @@ -174,8 +186,9 @@ func CheckMediaResponse(res *http.Response) error { } slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20)) return &Error{ - Code: res.StatusCode, - Body: string(slurp), + Code: res.StatusCode, + Body: string(slurp), + Header: res.Header, } } diff --git a/vendor/google.golang.org/api/googleapi/transport/apikey.go b/vendor/google.golang.org/api/googleapi/transport/apikey.go new file mode 100644 index 0000000000000..f5d826c2a19d5 --- /dev/null +++ b/vendor/google.golang.org/api/googleapi/transport/apikey.go @@ -0,0 +1,44 @@ +// Copyright 2012 Google LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package transport contains HTTP transports used to make +// authenticated API requests. +// +// This package is DEPRECATED. Users should instead use, +// +// service, err := NewService(..., option.WithAPIKey(...)) +package transport + +import ( + "errors" + "net/http" +) + +// APIKey is an HTTP Transport which wraps an underlying transport and +// appends an API Key "key" parameter to the URL of outgoing requests. +// +// Deprecated: please use NewService(..., option.WithAPIKey(...)) instead. +type APIKey struct { + // Key is the API Key to set on requests. + Key string + + // Transport is the underlying HTTP transport. + // If nil, http.DefaultTransport is used. + Transport http.RoundTripper +} + +func (t *APIKey) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.Transport + if rt == nil { + rt = http.DefaultTransport + if rt == nil { + return nil, errors.New("googleapi/transport: no Transport specified or available") + } + } + newReq := *req + args := newReq.URL.Query() + args.Set("key", t.Key) + newReq.URL.RawQuery = args.Encode() + return rt.RoundTrip(&newReq) +} diff --git a/vendor/google.golang.org/api/internal/version.go b/vendor/google.golang.org/api/internal/version.go index b5a685e7053f6..f135505662141 100644 --- a/vendor/google.golang.org/api/internal/version.go +++ b/vendor/google.golang.org/api/internal/version.go @@ -5,4 +5,4 @@ package internal // Version is the current tagged release of the library. -const Version = "0.93.0" +const Version = "0.110.0" diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go index 343a5a965ebc9..cc7ebfe277bf6 100644 --- a/vendor/google.golang.org/api/option/internaloption/internaloption.go +++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go @@ -134,3 +134,10 @@ type withCreds google.Credentials func (w *withCreds) Apply(o *internal.DialSettings) { o.InternalCredentials = (*google.Credentials)(w) } + +// EmbeddableAdapter is a no-op option.ClientOption that allow libraries to +// create their own client options by embedding this type into their own +// client-specific option wrapper. See example for usage. +type EmbeddableAdapter struct{} + +func (*EmbeddableAdapter) Apply(_ *internal.DialSettings) {} diff --git a/vendor/google.golang.org/api/option/option.go b/vendor/google.golang.org/api/option/option.go index 27ba9eab01809..b2085a1949abf 100644 --- a/vendor/google.golang.org/api/option/option.go +++ b/vendor/google.golang.org/api/option/option.go @@ -82,6 +82,9 @@ func (w withEndpoint) Apply(o *internal.DialSettings) { // WithScopes returns a ClientOption that overrides the default OAuth2 scopes // to be used for a service. +// +// If both WithScopes and WithTokenSource are used, scope settings from the +// token source will be used instead. func WithScopes(scope ...string) ClientOption { return withScopes(scope) } @@ -93,7 +96,9 @@ func (w withScopes) Apply(o *internal.DialSettings) { copy(o.Scopes, w) } -// WithUserAgent returns a ClientOption that sets the User-Agent. +// WithUserAgent returns a ClientOption that sets the User-Agent. This option +// is incompatible with the [WithHTTPClient] option. If you wish to provide a +// custom client you will need to add this header via RoundTripper middleware. func WithUserAgent(ua string) ClientOption { return withUA(ua) } @@ -305,9 +310,9 @@ func (w withClientCertSource) Apply(o *internal.DialSettings) { // // This is an EXPERIMENTAL API and may be changed or removed in the future. // -// This option has been replaced by `impersonate` package: +// Deprecated: This option has been replaced by `impersonate` package: // `google.golang.org/api/impersonate`. Please use the `impersonate` package -// instead. +// instead with the WithTokenSource option. func ImpersonateCredentials(target string, delegates ...string) ClientOption { return impersonateServiceAccount{ target: target, diff --git a/vendor/google.golang.org/api/transport/cert/enterprise_cert.go b/vendor/google.golang.org/api/transport/cert/enterprise_cert.go index eaa52e07c086b..1061b5f05f3b9 100644 --- a/vendor/google.golang.org/api/transport/cert/enterprise_cert.go +++ b/vendor/google.golang.org/api/transport/cert/enterprise_cert.go @@ -15,7 +15,6 @@ package cert import ( "crypto/tls" "errors" - "os" "github.com/googleapis/enterprise-certificate-proxy/client" ) @@ -36,8 +35,7 @@ type ecpSource struct { func NewEnterpriseCertificateProxySource(configFilePath string) (Source, error) { key, err := client.Cred(configFilePath) if err != nil { - if errors.Is(err, os.ErrNotExist) { - // Config file missing means Enterprise Certificate Proxy is not supported. + if errors.Is(err, client.ErrCredUnavailable) { return nil, errSourceUnavailable } return nil, err diff --git a/vendor/google.golang.org/api/transport/grpc/dial.go b/vendor/google.golang.org/api/transport/grpc/dial.go index c86f56507f514..efcc8e6c641f9 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial.go +++ b/vendor/google.golang.org/api/transport/grpc/dial.go @@ -25,6 +25,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials" grpcgoogle "google.golang.org/grpc/credentials/google" + grpcinsecure "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/credentials/oauth" // Install grpclb, which is required for direct path. @@ -126,10 +127,26 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C if err != nil { return nil, err } - var grpcOpts []grpc.DialOption + + var transportCreds credentials.TransportCredentials if insecure { - grpcOpts = []grpc.DialOption{grpc.WithInsecure()} - } else if !o.NoAuth { + transportCreds = grpcinsecure.NewCredentials() + } else { + transportCreds = credentials.NewTLS(&tls.Config{ + GetClientCertificate: clientCertSource, + }) + } + + // Initialize gRPC dial options with transport-level security options. + grpcOpts := []grpc.DialOption{ + grpc.WithTransportCredentials(transportCreds), + } + + // Authentication can only be sent when communicating over a secure connection. + // + // TODO: Should we be more lenient in the future and allow sending credentials + // when dialing an insecure connection? + if !o.NoAuth && !insecure { if o.APIKey != "" { log.Print("API keys are not supported for gRPC APIs. Remove the WithAPIKey option from your client-creating call.") } @@ -142,8 +159,17 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C o.QuotaProject = internal.QuotaProjectFromCreds(creds) } + grpcOpts = append(grpcOpts, + grpc.WithPerRPCCredentials(grpcTokenSource{ + TokenSource: oauth.TokenSource{creds.TokenSource}, + quotaProject: o.QuotaProject, + requestReason: o.RequestReason, + }), + ) + // Attempt Direct Path: if isDirectPathEnabled(endpoint, o) && isTokenSourceDirectPathCompatible(creds.TokenSource, o) && metadata.OnGCE() { + // Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates. grpcOpts = []grpc.DialOption{ grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{oauth.TokenSource{creds.TokenSource}}))} if timeoutDialerOption != nil { @@ -153,9 +179,9 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C if strings.EqualFold(os.Getenv(enableDirectPathXds), "true") { // google-c2p resolver target must not have a port number if addr, _, err := net.SplitHostPort(endpoint); err == nil { - endpoint = "google-c2p-experimental:///" + addr + endpoint = "google-c2p:///" + addr } else { - endpoint = "google-c2p-experimental:///" + endpoint + endpoint = "google-c2p:///" + endpoint } } else { if !strings.HasPrefix(endpoint, "dns:///") { @@ -169,18 +195,6 @@ func dial(ctx context.Context, insecure bool, o *internal.DialSettings) (*grpc.C grpc.WithDefaultServiceConfig(`{"loadBalancingConfig":[{"grpclb":{"childPolicy":[{"pick_first":{}}]}}]}`)) } // TODO(cbro): add support for system parameters (quota project, request reason) via chained interceptor. - } else { - tlsConfig := &tls.Config{ - GetClientCertificate: clientCertSource, - } - grpcOpts = []grpc.DialOption{ - grpc.WithPerRPCCredentials(grpcTokenSource{ - TokenSource: oauth.TokenSource{creds.TokenSource}, - quotaProject: o.QuotaProject, - requestReason: o.RequestReason, - }), - grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)), - } } } diff --git a/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go b/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go index 4bf9e82172398..507cd3ec63ab5 100644 --- a/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go +++ b/vendor/google.golang.org/api/transport/grpc/dial_socketopt.go @@ -12,7 +12,6 @@ import ( "net" "syscall" - "golang.org/x/sys/unix" "google.golang.org/grpc" ) @@ -20,6 +19,9 @@ const ( // defaultTCPUserTimeout is the default TCP_USER_TIMEOUT socket option. By // default is 20 seconds. tcpUserTimeoutMilliseconds = 20000 + + // Copied from golang.org/x/sys/unix.TCP_USER_TIMEOUT. + tcpUserTimeoutOp = 0x12 ) func init() { @@ -33,7 +35,7 @@ func dialTCPUserTimeout(ctx context.Context, addr string) (net.Conn, error) { var syscallErr error controlErr := c.Control(func(fd uintptr) { syscallErr = syscall.SetsockoptInt( - int(fd), syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, tcpUserTimeoutMilliseconds) + int(fd), syscall.IPPROTO_TCP, tcpUserTimeoutOp, tcpUserTimeoutMilliseconds) }) if syscallErr != nil { return syscallErr diff --git a/vendor/google.golang.org/api/transport/http/dial.go b/vendor/google.golang.org/api/transport/http/dial.go new file mode 100644 index 0000000000000..47568a4061d13 --- /dev/null +++ b/vendor/google.golang.org/api/transport/http/dial.go @@ -0,0 +1,233 @@ +// Copyright 2015 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package http supports network connections to HTTP servers. +// This package is not intended for use by end developers. Use the +// google.golang.org/api/option package to configure API clients. +package http + +import ( + "context" + "crypto/tls" + "errors" + "net" + "net/http" + "time" + + "go.opencensus.io/plugin/ochttp" + "golang.org/x/net/http2" + "golang.org/x/oauth2" + "google.golang.org/api/googleapi/transport" + "google.golang.org/api/internal" + "google.golang.org/api/option" + "google.golang.org/api/transport/cert" + "google.golang.org/api/transport/http/internal/propagation" + "google.golang.org/api/transport/internal/dca" +) + +// NewClient returns an HTTP client for use communicating with a Google cloud +// service, configured with the given ClientOptions. It also returns the endpoint +// for the service as specified in the options. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) { + settings, err := newSettings(opts) + if err != nil { + return nil, "", err + } + clientCertSource, endpoint, err := dca.GetClientCertificateSourceAndEndpoint(settings) + if err != nil { + return nil, "", err + } + // TODO(cbro): consider injecting the User-Agent even if an explicit HTTP client is provided? + if settings.HTTPClient != nil { + return settings.HTTPClient, endpoint, nil + } + trans, err := newTransport(ctx, defaultBaseTransport(ctx, clientCertSource), settings) + if err != nil { + return nil, "", err + } + return &http.Client{Transport: trans}, endpoint, nil +} + +// NewTransport creates an http.RoundTripper for use communicating with a Google +// cloud service, configured with the given ClientOptions. Its RoundTrip method delegates to base. +func NewTransport(ctx context.Context, base http.RoundTripper, opts ...option.ClientOption) (http.RoundTripper, error) { + settings, err := newSettings(opts) + if err != nil { + return nil, err + } + if settings.HTTPClient != nil { + return nil, errors.New("transport/http: WithHTTPClient passed to NewTransport") + } + return newTransport(ctx, base, settings) +} + +func newTransport(ctx context.Context, base http.RoundTripper, settings *internal.DialSettings) (http.RoundTripper, error) { + paramTransport := ¶meterTransport{ + base: base, + userAgent: settings.UserAgent, + quotaProject: settings.QuotaProject, + requestReason: settings.RequestReason, + } + var trans http.RoundTripper = paramTransport + trans = addOCTransport(trans, settings) + switch { + case settings.NoAuth: + // Do nothing. + case settings.APIKey != "": + trans = &transport.APIKey{ + Transport: trans, + Key: settings.APIKey, + } + default: + creds, err := internal.Creds(ctx, settings) + if err != nil { + return nil, err + } + if paramTransport.quotaProject == "" { + paramTransport.quotaProject = internal.QuotaProjectFromCreds(creds) + } + + ts := creds.TokenSource + if settings.ImpersonationConfig == nil && settings.TokenSource != nil { + ts = settings.TokenSource + } + trans = &oauth2.Transport{ + Base: trans, + Source: ts, + } + } + return trans, nil +} + +func newSettings(opts []option.ClientOption) (*internal.DialSettings, error) { + var o internal.DialSettings + for _, opt := range opts { + opt.Apply(&o) + } + if err := o.Validate(); err != nil { + return nil, err + } + if o.GRPCConn != nil { + return nil, errors.New("unsupported gRPC connection specified") + } + return &o, nil +} + +type parameterTransport struct { + userAgent string + quotaProject string + requestReason string + + base http.RoundTripper +} + +func (t *parameterTransport) RoundTrip(req *http.Request) (*http.Response, error) { + rt := t.base + if rt == nil { + return nil, errors.New("transport: no Transport specified") + } + newReq := *req + newReq.Header = make(http.Header) + for k, vv := range req.Header { + newReq.Header[k] = vv + } + if t.userAgent != "" { + // TODO(cbro): append to existing User-Agent header? + newReq.Header.Set("User-Agent", t.userAgent) + } + + // Attach system parameters into the header + if t.quotaProject != "" { + newReq.Header.Set("X-Goog-User-Project", t.quotaProject) + } + if t.requestReason != "" { + newReq.Header.Set("X-Goog-Request-Reason", t.requestReason) + } + + return rt.RoundTrip(&newReq) +} + +// Set at init time by dial_appengine.go. If nil, we're not on App Engine. +var appengineUrlfetchHook func(context.Context) http.RoundTripper + +// defaultBaseTransport returns the base HTTP transport. +// On App Engine, this is urlfetch.Transport. +// Otherwise, use a default transport, taking most defaults from +// http.DefaultTransport. +// If TLSCertificate is available, set TLSClientConfig as well. +func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source) http.RoundTripper { + if appengineUrlfetchHook != nil { + return appengineUrlfetchHook(ctx) + } + + // Copy http.DefaultTransport except for MaxIdleConnsPerHost setting, + // which is increased due to reported performance issues under load in the GCS + // client. Transport.Clone is only available in Go 1.13 and up. + trans := clonedTransport(http.DefaultTransport) + if trans == nil { + trans = fallbackBaseTransport() + } + trans.MaxIdleConnsPerHost = 100 + + if clientCertSource != nil { + trans.TLSClientConfig = &tls.Config{ + GetClientCertificate: clientCertSource, + } + } + + configureHTTP2(trans) + + return trans +} + +// configureHTTP2 configures the ReadIdleTimeout HTTP/2 option for the +// transport. This allows broken idle connections to be pruned more quickly, +// preventing the client from attempting to re-use connections that will no +// longer work. +func configureHTTP2(trans *http.Transport) { + http2Trans, err := http2.ConfigureTransports(trans) + if err == nil { + http2Trans.ReadIdleTimeout = time.Second * 31 + } +} + +// fallbackBaseTransport is used in httpHeaderMaxSize { + return trace.SpanContext{}, false + } + + // Parse the trace id field. + slash := strings.Index(h, `/`) + if slash == -1 { + return trace.SpanContext{}, false + } + tid, h := h[:slash], h[slash+1:] + + buf, err := hex.DecodeString(tid) + if err != nil { + return trace.SpanContext{}, false + } + copy(sc.TraceID[:], buf) + + // Parse the span id field. + spanstr := h + semicolon := strings.Index(h, `;`) + if semicolon != -1 { + spanstr, h = h[:semicolon], h[semicolon+1:] + } + sid, err := strconv.ParseUint(spanstr, 10, 64) + if err != nil { + return trace.SpanContext{}, false + } + binary.BigEndian.PutUint64(sc.SpanID[:], sid) + + // Parse the options field, options field is optional. + if !strings.HasPrefix(h, "o=") { + return sc, true + } + o, err := strconv.ParseUint(h[2:], 10, 64) + if err != nil { + return trace.SpanContext{}, false + } + sc.TraceOptions = trace.TraceOptions(o) + return sc, true +} + +// SpanContextToRequest modifies the given request to include a Stackdriver Trace header. +func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { + sid := binary.BigEndian.Uint64(sc.SpanID[:]) + header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions)) + req.Header.Set(httpHeader, header) +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go index 66fdb650f4baa..4c91534d5a9db 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go @@ -15,17 +15,20 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/api/client.proto package annotations import ( reflect "reflect" + sync "sync" + api "google.golang.org/genproto/googleapis/api" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" descriptorpb "google.golang.org/protobuf/types/descriptorpb" + durationpb "google.golang.org/protobuf/types/known/durationpb" ) const ( @@ -35,6 +38,1050 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// The organization for which the client libraries are being published. +// Affects the url where generated docs are published, etc. +type ClientLibraryOrganization int32 + +const ( + // Not useful. + ClientLibraryOrganization_CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED ClientLibraryOrganization = 0 + // Google Cloud Platform Org. + ClientLibraryOrganization_CLOUD ClientLibraryOrganization = 1 + // Ads (Advertising) Org. + ClientLibraryOrganization_ADS ClientLibraryOrganization = 2 + // Photos Org. + ClientLibraryOrganization_PHOTOS ClientLibraryOrganization = 3 + // Street View Org. + ClientLibraryOrganization_STREET_VIEW ClientLibraryOrganization = 4 +) + +// Enum value maps for ClientLibraryOrganization. +var ( + ClientLibraryOrganization_name = map[int32]string{ + 0: "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED", + 1: "CLOUD", + 2: "ADS", + 3: "PHOTOS", + 4: "STREET_VIEW", + } + ClientLibraryOrganization_value = map[string]int32{ + "CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED": 0, + "CLOUD": 1, + "ADS": 2, + "PHOTOS": 3, + "STREET_VIEW": 4, + } +) + +func (x ClientLibraryOrganization) Enum() *ClientLibraryOrganization { + p := new(ClientLibraryOrganization) + *p = x + return p +} + +func (x ClientLibraryOrganization) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ClientLibraryOrganization) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_client_proto_enumTypes[0].Descriptor() +} + +func (ClientLibraryOrganization) Type() protoreflect.EnumType { + return &file_google_api_client_proto_enumTypes[0] +} + +func (x ClientLibraryOrganization) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ClientLibraryOrganization.Descriptor instead. +func (ClientLibraryOrganization) EnumDescriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{0} +} + +// To where should client libraries be published? +type ClientLibraryDestination int32 + +const ( + // Client libraries will neither be generated nor published to package + // managers. + ClientLibraryDestination_CLIENT_LIBRARY_DESTINATION_UNSPECIFIED ClientLibraryDestination = 0 + // Generate the client library in a repo under github.com/googleapis, + // but don't publish it to package managers. + ClientLibraryDestination_GITHUB ClientLibraryDestination = 10 + // Publish the library to package managers like nuget.org and npmjs.com. + ClientLibraryDestination_PACKAGE_MANAGER ClientLibraryDestination = 20 +) + +// Enum value maps for ClientLibraryDestination. +var ( + ClientLibraryDestination_name = map[int32]string{ + 0: "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED", + 10: "GITHUB", + 20: "PACKAGE_MANAGER", + } + ClientLibraryDestination_value = map[string]int32{ + "CLIENT_LIBRARY_DESTINATION_UNSPECIFIED": 0, + "GITHUB": 10, + "PACKAGE_MANAGER": 20, + } +) + +func (x ClientLibraryDestination) Enum() *ClientLibraryDestination { + p := new(ClientLibraryDestination) + *p = x + return p +} + +func (x ClientLibraryDestination) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ClientLibraryDestination) Descriptor() protoreflect.EnumDescriptor { + return file_google_api_client_proto_enumTypes[1].Descriptor() +} + +func (ClientLibraryDestination) Type() protoreflect.EnumType { + return &file_google_api_client_proto_enumTypes[1] +} + +func (x ClientLibraryDestination) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ClientLibraryDestination.Descriptor instead. +func (ClientLibraryDestination) EnumDescriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{1} +} + +// Required information for every language. +type CommonLanguageSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Link to automatically generated reference documentation. Example: + // https://cloud.google.com/nodejs/docs/reference/asset/latest + // + // Deprecated: Do not use. + ReferenceDocsUri string `protobuf:"bytes,1,opt,name=reference_docs_uri,json=referenceDocsUri,proto3" json:"reference_docs_uri,omitempty"` + // The destination where API teams want this client library to be published. + Destinations []ClientLibraryDestination `protobuf:"varint,2,rep,packed,name=destinations,proto3,enum=google.api.ClientLibraryDestination" json:"destinations,omitempty"` +} + +func (x *CommonLanguageSettings) Reset() { + *x = CommonLanguageSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CommonLanguageSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommonLanguageSettings) ProtoMessage() {} + +func (x *CommonLanguageSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommonLanguageSettings.ProtoReflect.Descriptor instead. +func (*CommonLanguageSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{0} +} + +// Deprecated: Do not use. +func (x *CommonLanguageSettings) GetReferenceDocsUri() string { + if x != nil { + return x.ReferenceDocsUri + } + return "" +} + +func (x *CommonLanguageSettings) GetDestinations() []ClientLibraryDestination { + if x != nil { + return x.Destinations + } + return nil +} + +// Details about how and where to publish client libraries. +type ClientLibrarySettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Version of the API to apply these settings to. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Launch stage of this version of the API. + LaunchStage api.LaunchStage `protobuf:"varint,2,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"` + // When using transport=rest, the client request will encode enums as + // numbers rather than strings. + RestNumericEnums bool `protobuf:"varint,3,opt,name=rest_numeric_enums,json=restNumericEnums,proto3" json:"rest_numeric_enums,omitempty"` + // Settings for legacy Java features, supported in the Service YAML. + JavaSettings *JavaSettings `protobuf:"bytes,21,opt,name=java_settings,json=javaSettings,proto3" json:"java_settings,omitempty"` + // Settings for C++ client libraries. + CppSettings *CppSettings `protobuf:"bytes,22,opt,name=cpp_settings,json=cppSettings,proto3" json:"cpp_settings,omitempty"` + // Settings for PHP client libraries. + PhpSettings *PhpSettings `protobuf:"bytes,23,opt,name=php_settings,json=phpSettings,proto3" json:"php_settings,omitempty"` + // Settings for Python client libraries. + PythonSettings *PythonSettings `protobuf:"bytes,24,opt,name=python_settings,json=pythonSettings,proto3" json:"python_settings,omitempty"` + // Settings for Node client libraries. + NodeSettings *NodeSettings `protobuf:"bytes,25,opt,name=node_settings,json=nodeSettings,proto3" json:"node_settings,omitempty"` + // Settings for .NET client libraries. + DotnetSettings *DotnetSettings `protobuf:"bytes,26,opt,name=dotnet_settings,json=dotnetSettings,proto3" json:"dotnet_settings,omitempty"` + // Settings for Ruby client libraries. + RubySettings *RubySettings `protobuf:"bytes,27,opt,name=ruby_settings,json=rubySettings,proto3" json:"ruby_settings,omitempty"` + // Settings for Go client libraries. + GoSettings *GoSettings `protobuf:"bytes,28,opt,name=go_settings,json=goSettings,proto3" json:"go_settings,omitempty"` +} + +func (x *ClientLibrarySettings) Reset() { + *x = ClientLibrarySettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ClientLibrarySettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ClientLibrarySettings) ProtoMessage() {} + +func (x *ClientLibrarySettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ClientLibrarySettings.ProtoReflect.Descriptor instead. +func (*ClientLibrarySettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{1} +} + +func (x *ClientLibrarySettings) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *ClientLibrarySettings) GetLaunchStage() api.LaunchStage { + if x != nil { + return x.LaunchStage + } + return api.LaunchStage_LAUNCH_STAGE_UNSPECIFIED +} + +func (x *ClientLibrarySettings) GetRestNumericEnums() bool { + if x != nil { + return x.RestNumericEnums + } + return false +} + +func (x *ClientLibrarySettings) GetJavaSettings() *JavaSettings { + if x != nil { + return x.JavaSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetCppSettings() *CppSettings { + if x != nil { + return x.CppSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetPhpSettings() *PhpSettings { + if x != nil { + return x.PhpSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetPythonSettings() *PythonSettings { + if x != nil { + return x.PythonSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetNodeSettings() *NodeSettings { + if x != nil { + return x.NodeSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetDotnetSettings() *DotnetSettings { + if x != nil { + return x.DotnetSettings + } + return nil +} + +func (x *ClientLibrarySettings) GetRubySettings() *RubySettings { + if x != nil { + return x.RubySettings + } + return nil +} + +func (x *ClientLibrarySettings) GetGoSettings() *GoSettings { + if x != nil { + return x.GoSettings + } + return nil +} + +// This message configures the settings for publishing [Google Cloud Client +// libraries](https://cloud.google.com/apis/docs/cloud-client-libraries) +// generated from the service config. +type Publishing struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A list of API method settings, e.g. the behavior for methods that use the + // long-running operation pattern. + MethodSettings []*MethodSettings `protobuf:"bytes,2,rep,name=method_settings,json=methodSettings,proto3" json:"method_settings,omitempty"` + // Link to a place that API users can report issues. Example: + // https://issuetracker.google.com/issues/new?component=190865&template=1161103 + NewIssueUri string `protobuf:"bytes,101,opt,name=new_issue_uri,json=newIssueUri,proto3" json:"new_issue_uri,omitempty"` + // Link to product home page. Example: + // https://cloud.google.com/asset-inventory/docs/overview + DocumentationUri string `protobuf:"bytes,102,opt,name=documentation_uri,json=documentationUri,proto3" json:"documentation_uri,omitempty"` + // Used as a tracking tag when collecting data about the APIs developer + // relations artifacts like docs, packages delivered to package managers, + // etc. Example: "speech". + ApiShortName string `protobuf:"bytes,103,opt,name=api_short_name,json=apiShortName,proto3" json:"api_short_name,omitempty"` + // GitHub label to apply to issues and pull requests opened for this API. + GithubLabel string `protobuf:"bytes,104,opt,name=github_label,json=githubLabel,proto3" json:"github_label,omitempty"` + // GitHub teams to be added to CODEOWNERS in the directory in GitHub + // containing source code for the client libraries for this API. + CodeownerGithubTeams []string `protobuf:"bytes,105,rep,name=codeowner_github_teams,json=codeownerGithubTeams,proto3" json:"codeowner_github_teams,omitempty"` + // A prefix used in sample code when demarking regions to be included in + // documentation. + DocTagPrefix string `protobuf:"bytes,106,opt,name=doc_tag_prefix,json=docTagPrefix,proto3" json:"doc_tag_prefix,omitempty"` + // For whom the client library is being published. + Organization ClientLibraryOrganization `protobuf:"varint,107,opt,name=organization,proto3,enum=google.api.ClientLibraryOrganization" json:"organization,omitempty"` + // Client library settings. If the same version string appears multiple + // times in this list, then the last one wins. Settings from earlier + // settings with the same version string are discarded. + LibrarySettings []*ClientLibrarySettings `protobuf:"bytes,109,rep,name=library_settings,json=librarySettings,proto3" json:"library_settings,omitempty"` +} + +func (x *Publishing) Reset() { + *x = Publishing{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Publishing) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Publishing) ProtoMessage() {} + +func (x *Publishing) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Publishing.ProtoReflect.Descriptor instead. +func (*Publishing) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{2} +} + +func (x *Publishing) GetMethodSettings() []*MethodSettings { + if x != nil { + return x.MethodSettings + } + return nil +} + +func (x *Publishing) GetNewIssueUri() string { + if x != nil { + return x.NewIssueUri + } + return "" +} + +func (x *Publishing) GetDocumentationUri() string { + if x != nil { + return x.DocumentationUri + } + return "" +} + +func (x *Publishing) GetApiShortName() string { + if x != nil { + return x.ApiShortName + } + return "" +} + +func (x *Publishing) GetGithubLabel() string { + if x != nil { + return x.GithubLabel + } + return "" +} + +func (x *Publishing) GetCodeownerGithubTeams() []string { + if x != nil { + return x.CodeownerGithubTeams + } + return nil +} + +func (x *Publishing) GetDocTagPrefix() string { + if x != nil { + return x.DocTagPrefix + } + return "" +} + +func (x *Publishing) GetOrganization() ClientLibraryOrganization { + if x != nil { + return x.Organization + } + return ClientLibraryOrganization_CLIENT_LIBRARY_ORGANIZATION_UNSPECIFIED +} + +func (x *Publishing) GetLibrarySettings() []*ClientLibrarySettings { + if x != nil { + return x.LibrarySettings + } + return nil +} + +// Settings for Java client libraries. +type JavaSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The package name to use in Java. Clobbers the java_package option + // set in the protobuf. This should be used **only** by APIs + // who have already set the language_settings.java.package_name" field + // in gapic.yaml. API teams should use the protobuf java_package option + // where possible. + // + // Example of a YAML configuration:: + // + // publishing: + // java_settings: + // library_package: com.google.cloud.pubsub.v1 + LibraryPackage string `protobuf:"bytes,1,opt,name=library_package,json=libraryPackage,proto3" json:"library_package,omitempty"` + // Configure the Java class name to use instead of the service's for its + // corresponding generated GAPIC client. Keys are fully-qualified + // service names as they appear in the protobuf (including the full + // the language_settings.java.interface_names" field in gapic.yaml. API + // teams should otherwise use the service name as it appears in the + // protobuf. + // + // Example of a YAML configuration:: + // + // publishing: + // java_settings: + // service_class_names: + // - google.pubsub.v1.Publisher: TopicAdmin + // - google.pubsub.v1.Subscriber: SubscriptionAdmin + ServiceClassNames map[string]string `protobuf:"bytes,2,rep,name=service_class_names,json=serviceClassNames,proto3" json:"service_class_names,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,3,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *JavaSettings) Reset() { + *x = JavaSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JavaSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JavaSettings) ProtoMessage() {} + +func (x *JavaSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JavaSettings.ProtoReflect.Descriptor instead. +func (*JavaSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{3} +} + +func (x *JavaSettings) GetLibraryPackage() string { + if x != nil { + return x.LibraryPackage + } + return "" +} + +func (x *JavaSettings) GetServiceClassNames() map[string]string { + if x != nil { + return x.ServiceClassNames + } + return nil +} + +func (x *JavaSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for C++ client libraries. +type CppSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *CppSettings) Reset() { + *x = CppSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CppSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CppSettings) ProtoMessage() {} + +func (x *CppSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CppSettings.ProtoReflect.Descriptor instead. +func (*CppSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{4} +} + +func (x *CppSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Php client libraries. +type PhpSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *PhpSettings) Reset() { + *x = PhpSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PhpSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PhpSettings) ProtoMessage() {} + +func (x *PhpSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PhpSettings.ProtoReflect.Descriptor instead. +func (*PhpSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{5} +} + +func (x *PhpSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Python client libraries. +type PythonSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *PythonSettings) Reset() { + *x = PythonSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PythonSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PythonSettings) ProtoMessage() {} + +func (x *PythonSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PythonSettings.ProtoReflect.Descriptor instead. +func (*PythonSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{6} +} + +func (x *PythonSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Node client libraries. +type NodeSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *NodeSettings) Reset() { + *x = NodeSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NodeSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NodeSettings) ProtoMessage() {} + +func (x *NodeSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NodeSettings.ProtoReflect.Descriptor instead. +func (*NodeSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{7} +} + +func (x *NodeSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Dotnet client libraries. +type DotnetSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *DotnetSettings) Reset() { + *x = DotnetSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DotnetSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DotnetSettings) ProtoMessage() {} + +func (x *DotnetSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DotnetSettings.ProtoReflect.Descriptor instead. +func (*DotnetSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{8} +} + +func (x *DotnetSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Ruby client libraries. +type RubySettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *RubySettings) Reset() { + *x = RubySettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RubySettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RubySettings) ProtoMessage() {} + +func (x *RubySettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RubySettings.ProtoReflect.Descriptor instead. +func (*RubySettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{9} +} + +func (x *RubySettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Settings for Go client libraries. +type GoSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Some settings. + Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *GoSettings) Reset() { + *x = GoSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GoSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GoSettings) ProtoMessage() {} + +func (x *GoSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GoSettings.ProtoReflect.Descriptor instead. +func (*GoSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{10} +} + +func (x *GoSettings) GetCommon() *CommonLanguageSettings { + if x != nil { + return x.Common + } + return nil +} + +// Describes the generator configuration for a method. +type MethodSettings struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The fully qualified name of the method, for which the options below apply. + // This is used to find the method to apply the options. + Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"` + // Describes settings to use for long-running operations when generating + // API methods for RPCs. Complements RPCs that use the annotations in + // google/longrunning/operations.proto. + // + // Example of a YAML configuration:: + // + // publishing: + // method_behavior: + // - selector: CreateAdDomain + // long_running: + // initial_poll_delay: + // seconds: 60 # 1 minute + // poll_delay_multiplier: 1.5 + // max_poll_delay: + // seconds: 360 # 6 minutes + // total_poll_timeout: + // seconds: 54000 # 90 minutes + LongRunning *MethodSettings_LongRunning `protobuf:"bytes,2,opt,name=long_running,json=longRunning,proto3" json:"long_running,omitempty"` +} + +func (x *MethodSettings) Reset() { + *x = MethodSettings{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodSettings) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodSettings) ProtoMessage() {} + +func (x *MethodSettings) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodSettings.ProtoReflect.Descriptor instead. +func (*MethodSettings) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{11} +} + +func (x *MethodSettings) GetSelector() string { + if x != nil { + return x.Selector + } + return "" +} + +func (x *MethodSettings) GetLongRunning() *MethodSettings_LongRunning { + if x != nil { + return x.LongRunning + } + return nil +} + +// Describes settings to use when generating API methods that use the +// long-running operation pattern. +// All default values below are from those used in the client library +// generators (e.g. +// [Java](https://github.com/googleapis/gapic-generator-java/blob/04c2faa191a9b5a10b92392fe8482279c4404803/src/main/java/com/google/api/generator/gapic/composer/common/RetrySettingsComposer.java)). +type MethodSettings_LongRunning struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Initial delay after which the first poll request will be made. + // Default value: 5 seconds. + InitialPollDelay *durationpb.Duration `protobuf:"bytes,1,opt,name=initial_poll_delay,json=initialPollDelay,proto3" json:"initial_poll_delay,omitempty"` + // Multiplier to gradually increase delay between subsequent polls until it + // reaches max_poll_delay. + // Default value: 1.5. + PollDelayMultiplier float32 `protobuf:"fixed32,2,opt,name=poll_delay_multiplier,json=pollDelayMultiplier,proto3" json:"poll_delay_multiplier,omitempty"` + // Maximum time between two subsequent poll requests. + // Default value: 45 seconds. + MaxPollDelay *durationpb.Duration `protobuf:"bytes,3,opt,name=max_poll_delay,json=maxPollDelay,proto3" json:"max_poll_delay,omitempty"` + // Total polling timeout. + // Default value: 5 minutes. + TotalPollTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=total_poll_timeout,json=totalPollTimeout,proto3" json:"total_poll_timeout,omitempty"` +} + +func (x *MethodSettings_LongRunning) Reset() { + *x = MethodSettings_LongRunning{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_client_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MethodSettings_LongRunning) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MethodSettings_LongRunning) ProtoMessage() {} + +func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message { + mi := &file_google_api_client_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MethodSettings_LongRunning.ProtoReflect.Descriptor instead. +func (*MethodSettings_LongRunning) Descriptor() ([]byte, []int) { + return file_google_api_client_proto_rawDescGZIP(), []int{11, 0} +} + +func (x *MethodSettings_LongRunning) GetInitialPollDelay() *durationpb.Duration { + if x != nil { + return x.InitialPollDelay + } + return nil +} + +func (x *MethodSettings_LongRunning) GetPollDelayMultiplier() float32 { + if x != nil { + return x.PollDelayMultiplier + } + return 0 +} + +func (x *MethodSettings_LongRunning) GetMaxPollDelay() *durationpb.Duration { + if x != nil { + return x.MaxPollDelay + } + return nil +} + +func (x *MethodSettings_LongRunning) GetTotalPollTimeout() *durationpb.Duration { + if x != nil { + return x.TotalPollTimeout + } + return nil +} + var file_google_api_client_proto_extTypes = []protoimpl.ExtensionInfo{ { ExtendedType: (*descriptorpb.MethodOptions)(nil), @@ -78,26 +1125,26 @@ var ( // // For example, the proto RPC and annotation: // - // rpc CreateSubscription(CreateSubscriptionRequest) - // returns (Subscription) { - // option (google.api.method_signature) = "name,topic"; - // } + // rpc CreateSubscription(CreateSubscriptionRequest) + // returns (Subscription) { + // option (google.api.method_signature) = "name,topic"; + // } // // Would add the following Java overload (in addition to the method accepting // the request object): // - // public final Subscription createSubscription(String name, String topic) + // public final Subscription createSubscription(String name, String topic) // // The following backwards-compatibility guidelines apply: // - // * Adding this annotation to an unannotated method is backwards + // - Adding this annotation to an unannotated method is backwards // compatible. - // * Adding this annotation to a method which already has existing + // - Adding this annotation to a method which already has existing // method signature annotations is backwards compatible if and only if // the new method signature annotation is last in the sequence. - // * Modifying or removing an existing method signature annotation is + // - Modifying or removing an existing method signature annotation is // a breaking change. - // * Re-ordering existing method signature annotations is a breaking + // - Re-ordering existing method signature annotations is a breaking // change. // // repeated string method_signature = 1051; @@ -111,10 +1158,10 @@ var ( // // Example: // - // service Foo { - // option (google.api.default_host) = "foo.googleapi.com"; - // ... - // } + // service Foo { + // option (google.api.default_host) = "foo.googleapi.com"; + // ... + // } // // optional string default_host = 1049; E_DefaultHost = &file_google_api_client_proto_extTypes[1] @@ -122,22 +1169,22 @@ var ( // // Example: // - // service Foo { - // option (google.api.oauth_scopes) = \ - // "https://www.googleapis.com/auth/cloud-platform"; - // ... - // } + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform"; + // ... + // } // // If there is more than one scope, use a comma-separated string: // // Example: // - // service Foo { - // option (google.api.oauth_scopes) = \ - // "https://www.googleapis.com/auth/cloud-platform," - // "https://www.googleapis.com/auth/monitoring"; - // ... - // } + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform," + // "https://www.googleapis.com/auth/monitoring"; + // ... + // } // // optional string oauth_scopes = 1050; E_OauthScopes = &file_google_api_client_proto_extTypes[2] @@ -148,44 +1195,278 @@ var File_google_api_client_proto protoreflect.FileDescriptor var file_google_api_client_proto_rawDesc = []byte{ 0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, - 0x64, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, - 0x6f, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, - 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x12, 0x30, 0x0a, 0x12, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, + 0x6f, 0x63, 0x73, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, + 0x01, 0x52, 0x10, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x73, + 0x55, 0x72, 0x69, 0x12, 0x48, 0x0a, 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, + 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x93, 0x05, + 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, + 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a, + 0x12, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x65, 0x6e, + 0x75, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x73, 0x74, 0x4e, + 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6a, + 0x61, 0x76, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x15, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6a, 0x61, + 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x63, 0x70, + 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x70, + 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63, 0x70, 0x70, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68, 0x70, 0x5f, 0x73, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e, 0x6f, 0x64, 0x65, 0x5f, + 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4e, 0x6f, 0x64, 0x65, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, + 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, + 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x64, 0x6f, 0x74, + 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x72, + 0x75, 0x62, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1b, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x72, 0x75, + 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x67, 0x6f, + 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, + 0x6e, 0x67, 0x73, 0x22, 0xe0, 0x03, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x69, + 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x69, + 0x73, 0x73, 0x75, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x6e, 0x65, 0x77, 0x49, 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12, 0x2b, 0x0a, 0x11, 0x64, + 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, + 0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x69, 0x5f, + 0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x61, 0x70, 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x68, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x4c, 0x61, 0x62, 0x65, + 0x6c, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x69, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x14, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x47, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x54, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64, 0x6f, 0x63, 0x5f, 0x74, + 0x61, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x64, 0x6f, 0x63, 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x49, 0x0a, + 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x6b, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, + 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61, + 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10, 0x6c, 0x69, 0x62, 0x72, + 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x6d, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x22, 0x9a, 0x02, 0x0a, 0x0c, 0x4a, 0x61, 0x76, 0x61, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, + 0x72, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0e, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, + 0x12, 0x5f, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, + 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, 0x44, 0x0a, + 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, + 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x0b, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, + 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, + 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x49, + 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a, 0x0e, 0x50, 0x79, 0x74, + 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, + 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, + 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, + 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, + 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, + 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, + 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, + 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, + 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x8e, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, + 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, + 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, + 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, + 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, + 0x67, 0x1a, 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, + 0x67, 0x12, 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, + 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, + 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, + 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, + 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, + 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, + 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, + 0x47, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, + 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0x79, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, + 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, + 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, + 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, + 0x03, 0x41, 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, + 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, + 0x57, 0x10, 0x04, 0x2a, 0x67, 0x0a, 0x18, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, + 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, + 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, + 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, + 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41, 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, + 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, + 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42, 0x69, 0x0a, - 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, - 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, - 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, + 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, + 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, + 0x65, 0x73, 0x42, 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, + 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_client_proto_rawDescOnce sync.Once + file_google_api_client_proto_rawDescData = file_google_api_client_proto_rawDesc +) + +func file_google_api_client_proto_rawDescGZIP() []byte { + file_google_api_client_proto_rawDescOnce.Do(func() { + file_google_api_client_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_client_proto_rawDescData) + }) + return file_google_api_client_proto_rawDescData } +var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 14) var file_google_api_client_proto_goTypes = []interface{}{ - (*descriptorpb.MethodOptions)(nil), // 0: google.protobuf.MethodOptions - (*descriptorpb.ServiceOptions)(nil), // 1: google.protobuf.ServiceOptions + (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization + (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination + (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings + (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings + (*Publishing)(nil), // 4: google.api.Publishing + (*JavaSettings)(nil), // 5: google.api.JavaSettings + (*CppSettings)(nil), // 6: google.api.CppSettings + (*PhpSettings)(nil), // 7: google.api.PhpSettings + (*PythonSettings)(nil), // 8: google.api.PythonSettings + (*NodeSettings)(nil), // 9: google.api.NodeSettings + (*DotnetSettings)(nil), // 10: google.api.DotnetSettings + (*RubySettings)(nil), // 11: google.api.RubySettings + (*GoSettings)(nil), // 12: google.api.GoSettings + (*MethodSettings)(nil), // 13: google.api.MethodSettings + nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry + (*MethodSettings_LongRunning)(nil), // 15: google.api.MethodSettings.LongRunning + (api.LaunchStage)(0), // 16: google.api.LaunchStage + (*durationpb.Duration)(nil), // 17: google.protobuf.Duration + (*descriptorpb.MethodOptions)(nil), // 18: google.protobuf.MethodOptions + (*descriptorpb.ServiceOptions)(nil), // 19: google.protobuf.ServiceOptions } var file_google_api_client_proto_depIdxs = []int32{ - 0, // 0: google.api.method_signature:extendee -> google.protobuf.MethodOptions - 1, // 1: google.api.default_host:extendee -> google.protobuf.ServiceOptions - 1, // 2: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 0, // [0:3] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination + 16, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage + 5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings + 6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings + 7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings + 8, // 5: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings + 9, // 6: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings + 10, // 7: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings + 11, // 8: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings + 12, // 9: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings + 13, // 10: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings + 0, // 11: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization + 3, // 12: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings + 14, // 13: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry + 2, // 14: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 18: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 19: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 20: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings + 2, // 21: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings + 15, // 22: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning + 17, // 23: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration + 17, // 24: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration + 17, // 25: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration + 18, // 26: google.api.method_signature:extendee -> google.protobuf.MethodOptions + 19, // 27: google.api.default_host:extendee -> google.protobuf.ServiceOptions + 19, // 28: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions + 29, // [29:29] is the sub-list for method output_type + 29, // [29:29] is the sub-list for method input_type + 29, // [29:29] is the sub-list for extension type_name + 26, // [26:29] is the sub-list for extension extendee + 0, // [0:26] is the sub-list for field type_name } func init() { file_google_api_client_proto_init() } @@ -193,18 +1474,178 @@ func file_google_api_client_proto_init() { if File_google_api_client_proto != nil { return } + if !protoimpl.UnsafeEnabled { + file_google_api_client_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CommonLanguageSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ClientLibrarySettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Publishing); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JavaSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CppSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PhpSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PythonSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DotnetSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RubySettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GoSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodSettings); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_api_client_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MethodSettings_LongRunning); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_google_api_client_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, + NumEnums: 2, + NumMessages: 14, NumExtensions: 3, NumServices: 0, }, GoTypes: file_google_api_client_proto_goTypes, DependencyIndexes: file_google_api_client_proto_depIdxs, + EnumInfos: file_google_api_client_proto_enumTypes, + MessageInfos: file_google_api_client_proto_msgTypes, ExtensionInfos: file_google_api_client_proto_extTypes, }.Build() File_google_api_client_proto = out.File diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go index 4f34ab73cba11..6f11b7c500f8d 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go @@ -127,19 +127,19 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // Example: // -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/{name=messages/*}" -// }; -// } -// } -// message GetMessageRequest { -// string name = 1; // Mapped to URL path. -// } -// message Message { -// string text = 1; // The resource content. -// } +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/{name=messages/*}" +// }; +// } +// } +// message GetMessageRequest { +// string name = 1; // Mapped to URL path. +// } +// message Message { +// string text = 1; // The resource content. +// } // // This enables an HTTP REST to gRPC mapping as below: // @@ -151,21 +151,21 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // automatically become HTTP query parameters if there is no HTTP request body. // For example: // -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get:"/v1/messages/{message_id}" -// }; -// } -// } -// message GetMessageRequest { -// message SubMessage { -// string subfield = 1; -// } -// string message_id = 1; // Mapped to URL path. -// int64 revision = 2; // Mapped to URL query parameter `revision`. -// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. -// } +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get:"/v1/messages/{message_id}" +// }; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // Mapped to URL path. +// int64 revision = 2; // Mapped to URL query parameter `revision`. +// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. +// } // // This enables a HTTP JSON to RPC mapping as below: // @@ -186,18 +186,18 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // specifies the mapping. Consider a REST update method on the // message resource collection: // -// service Messaging { -// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "message" -// }; -// } -// } -// message UpdateMessageRequest { -// string message_id = 1; // mapped to the URL -// Message message = 2; // mapped to the body -// } +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } // // The following HTTP JSON to RPC mapping is enabled, where the // representation of the JSON in the request body is determined by @@ -213,19 +213,18 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // request body. This enables the following alternative definition of // the update method: // -// service Messaging { -// rpc UpdateMessage(Message) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "*" -// }; -// } -// } -// message Message { -// string message_id = 1; -// string text = 2; -// } -// +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } // // The following HTTP JSON to RPC mapping is enabled: // @@ -243,20 +242,20 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // It is possible to define multiple HTTP methods for one RPC by using // the `additional_bindings` option. Example: // -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/messages/{message_id}" -// additional_bindings { -// get: "/v1/users/{user_id}/messages/{message_id}" -// } -// }; -// } -// } -// message GetMessageRequest { -// string message_id = 1; -// string user_id = 2; -// } +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } // // This enables the following two alternative HTTP JSON to RPC mappings: // @@ -268,15 +267,15 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // ## Rules for HTTP mapping // -// 1. Leaf request fields (recursive expansion nested messages in the request -// message) are classified into three categories: -// - Fields referred by the path template. They are passed via the URL path. -// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP -// request body. -// - All other fields are passed via the URL query parameters, and the -// parameter name is the field path in the request message. A repeated -// field can be represented as multiple query parameters under the same -// name. +// 1. Leaf request fields (recursive expansion nested messages in the request +// message) are classified into three categories: +// - Fields referred by the path template. They are passed via the URL path. +// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP +// request body. +// - All other fields are passed via the URL query parameters, and the +// parameter name is the field path in the request message. A repeated +// field can be represented as multiple query parameters under the same +// name. // 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields // are passed via URL path and HTTP request body. // 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all @@ -284,12 +283,12 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // ### Path template syntax // -// Template = "/" Segments [ Verb ] ; -// Segments = Segment { "/" Segment } ; -// Segment = "*" | "**" | LITERAL | Variable ; -// Variable = "{" FieldPath [ "=" Segments ] "}" ; -// FieldPath = IDENT { "." IDENT } ; -// Verb = ":" LITERAL ; +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; // // The syntax `*` matches a single URL path segment. The syntax `**` matches // zero or more URL path segments, which must be the last part of the URL path @@ -338,11 +337,11 @@ func (x *Http) GetFullyDecodeReservedExpansion() bool { // // Example: // -// http: -// rules: -// # Selects a gRPC method and applies HttpRule to it. -// - selector: example.v1.Messaging.GetMessage -// get: /v1/messages/{message_id}/{sub.subfield} +// http: +// rules: +// # Selects a gRPC method and applies HttpRule to it. +// - selector: example.v1.Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} // // ## Special notes // diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go index 6515668d34f26..13ea54b2940f3 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go @@ -157,45 +157,45 @@ func (ResourceDescriptor_Style) EnumDescriptor() ([]byte, []int) { // // Example: // -// message Topic { -// // Indicates this message defines a resource schema. -// // Declares the resource type in the format of {service}/{kind}. -// // For Kubernetes resources, the format is {api group}/{kind}. -// option (google.api.resource) = { -// type: "pubsub.googleapis.com/Topic" -// pattern: "projects/{project}/topics/{topic}" -// }; -// } +// message Topic { +// // Indicates this message defines a resource schema. +// // Declares the resource type in the format of {service}/{kind}. +// // For Kubernetes resources, the format is {api group}/{kind}. +// option (google.api.resource) = { +// type: "pubsub.googleapis.com/Topic" +// pattern: "projects/{project}/topics/{topic}" +// }; +// } // // The ResourceDescriptor Yaml config will look like: // -// resources: -// - type: "pubsub.googleapis.com/Topic" -// pattern: "projects/{project}/topics/{topic}" +// resources: +// - type: "pubsub.googleapis.com/Topic" +// pattern: "projects/{project}/topics/{topic}" // // Sometimes, resources have multiple patterns, typically because they can // live under multiple parents. // // Example: // -// message LogEntry { -// option (google.api.resource) = { -// type: "logging.googleapis.com/LogEntry" -// pattern: "projects/{project}/logs/{log}" -// pattern: "folders/{folder}/logs/{log}" -// pattern: "organizations/{organization}/logs/{log}" -// pattern: "billingAccounts/{billing_account}/logs/{log}" -// }; -// } +// message LogEntry { +// option (google.api.resource) = { +// type: "logging.googleapis.com/LogEntry" +// pattern: "projects/{project}/logs/{log}" +// pattern: "folders/{folder}/logs/{log}" +// pattern: "organizations/{organization}/logs/{log}" +// pattern: "billingAccounts/{billing_account}/logs/{log}" +// }; +// } // // The ResourceDescriptor Yaml config will look like: // -// resources: -// - type: 'logging.googleapis.com/LogEntry' -// pattern: "projects/{project}/logs/{log}" -// pattern: "folders/{folder}/logs/{log}" -// pattern: "organizations/{organization}/logs/{log}" -// pattern: "billingAccounts/{billing_account}/logs/{log}" +// resources: +// - type: 'logging.googleapis.com/LogEntry' +// pattern: "projects/{project}/logs/{log}" +// pattern: "folders/{folder}/logs/{log}" +// pattern: "organizations/{organization}/logs/{log}" +// pattern: "billingAccounts/{billing_account}/logs/{log}" type ResourceDescriptor struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go index dd45cf6e6c1a3..6707a7b1c1d27 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go @@ -44,71 +44,71 @@ const ( // // Message Definition: // -// message Request { -// // The name of the Table -// // Values can be of the following formats: -// // - `projects//tables/` -// // - `projects//instances//tables/
` -// // - `region//zones//tables/
` -// string table_name = 1; -// -// // This value specifies routing for replication. -// // It can be in the following formats: -// // - `profiles/` -// // - a legacy `profile_id` that can be any string -// string app_profile_id = 2; -// } +// message Request { +// // The name of the Table +// // Values can be of the following formats: +// // - `projects//tables/
` +// // - `projects//instances//tables/
` +// // - `region//zones//tables/
` +// string table_name = 1; +// +// // This value specifies routing for replication. +// // It can be in the following formats: +// // - `profiles/` +// // - a legacy `profile_id` that can be any string +// string app_profile_id = 2; +// } // // Example message: // -// { -// table_name: projects/proj_foo/instances/instance_bar/table/table_baz, -// app_profile_id: profiles/prof_qux -// } +// { +// table_name: projects/proj_foo/instances/instance_bar/table/table_baz, +// app_profile_id: profiles/prof_qux +// } // // The routing header consists of one or multiple key-value pairs. Every key // and value must be percent-encoded, and joined together in the format of // `key1=value1&key2=value2`. // In the examples below I am skipping the percent-encoding for readablity. // -// Example 1 +// # Example 1 // // Extracting a field from the request to put into the routing header // unchanged, with the key equal to the field name. // // annotation: // -// option (google.api.routing) = { -// // Take the `app_profile_id`. -// routing_parameters { -// field: "app_profile_id" -// } -// }; +// option (google.api.routing) = { +// // Take the `app_profile_id`. +// routing_parameters { +// field: "app_profile_id" +// } +// }; // // result: // -// x-goog-request-params: app_profile_id=profiles/prof_qux +// x-goog-request-params: app_profile_id=profiles/prof_qux // -// Example 2 +// # Example 2 // // Extracting a field from the request to put into the routing header // unchanged, with the key different from the field name. // // annotation: // -// option (google.api.routing) = { -// // Take the `app_profile_id`, but name it `routing_id` in the header. -// routing_parameters { -// field: "app_profile_id" -// path_template: "{routing_id=**}" -// } -// }; +// option (google.api.routing) = { +// // Take the `app_profile_id`, but name it `routing_id` in the header. +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// }; // // result: // -// x-goog-request-params: routing_id=profiles/prof_qux +// x-goog-request-params: routing_id=profiles/prof_qux // -// Example 3 +// # Example 3 // // Extracting a field from the request to put into the routing // header, while matching a path template syntax on the field's value. @@ -116,91 +116,91 @@ const ( // NB: it is more useful to send nothing than to send garbage for the purpose // of dynamic routing, since garbage pollutes cache. Thus the matching. // -// Sub-example 3a +// # Sub-example 3a // // The field matches the template. // // annotation: // -// option (google.api.routing) = { -// // Take the `table_name`, if it's well-formed (with project-based -// // syntax). -// routing_parameters { -// field: "table_name" -// path_template: "{table_name=projects/*/instances/*/**}" -// } -// }; +// option (google.api.routing) = { +// // Take the `table_name`, if it's well-formed (with project-based +// // syntax). +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=projects/*/instances/*/**}" +// } +// }; // // result: // -// x-goog-request-params: -// table_name=projects/proj_foo/instances/instance_bar/table/table_baz +// x-goog-request-params: +// table_name=projects/proj_foo/instances/instance_bar/table/table_baz // -// Sub-example 3b +// # Sub-example 3b // // The field does not match the template. // // annotation: // -// option (google.api.routing) = { -// // Take the `table_name`, if it's well-formed (with region-based -// // syntax). -// routing_parameters { -// field: "table_name" -// path_template: "{table_name=regions/*/zones/*/**}" -// } -// }; +// option (google.api.routing) = { +// // Take the `table_name`, if it's well-formed (with region-based +// // syntax). +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=regions/*/zones/*/**}" +// } +// }; // // result: // -// +// // -// Sub-example 3c +// # Sub-example 3c // // Multiple alternative conflictingly named path templates are // specified. The one that matches is used to construct the header. // // annotation: // -// option (google.api.routing) = { -// // Take the `table_name`, if it's well-formed, whether -// // using the region- or projects-based syntax. +// option (google.api.routing) = { +// // Take the `table_name`, if it's well-formed, whether +// // using the region- or projects-based syntax. // -// routing_parameters { -// field: "table_name" -// path_template: "{table_name=regions/*/zones/*/**}" -// } -// routing_parameters { -// field: "table_name" -// path_template: "{table_name=projects/*/instances/*/**}" -// } -// }; +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=regions/*/zones/*/**}" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{table_name=projects/*/instances/*/**}" +// } +// }; // // result: // -// x-goog-request-params: -// table_name=projects/proj_foo/instances/instance_bar/table/table_baz +// x-goog-request-params: +// table_name=projects/proj_foo/instances/instance_bar/table/table_baz // -// Example 4 +// # Example 4 // // Extracting a single routing header key-value pair by matching a // template syntax on (a part of) a single request field. // // annotation: // -// option (google.api.routing) = { -// // Take just the project id from the `table_name` field. -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=projects/*}/**" -// } -// }; +// option (google.api.routing) = { +// // Take just the project id from the `table_name` field. +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// }; // // result: // -// x-goog-request-params: routing_id=projects/proj_foo +// x-goog-request-params: routing_id=projects/proj_foo // -// Example 5 +// # Example 5 // // Extracting a single routing header key-value pair by matching // several conflictingly named path templates on (parts of) a single request @@ -208,87 +208,87 @@ const ( // // annotation: // -// option (google.api.routing) = { -// // If the `table_name` does not have instances information, -// // take just the project id for routing. -// // Otherwise take project + instance. -// -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=projects/*}/**" -// } -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=projects/*/instances/*}/**" -// } -// }; +// option (google.api.routing) = { +// // If the `table_name` does not have instances information, +// // take just the project id for routing. +// // Otherwise take project + instance. +// +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*/instances/*}/**" +// } +// }; // // result: // -// x-goog-request-params: -// routing_id=projects/proj_foo/instances/instance_bar +// x-goog-request-params: +// routing_id=projects/proj_foo/instances/instance_bar // -// Example 6 +// # Example 6 // // Extracting multiple routing header key-value pairs by matching // several non-conflicting path templates on (parts of) a single request field. // -// Sub-example 6a +// # Sub-example 6a // // Make the templates strict, so that if the `table_name` does not // have an instance information, nothing is sent. // // annotation: // -// option (google.api.routing) = { -// // The routing code needs two keys instead of one composite -// // but works only for the tables with the "project-instance" name -// // syntax. -// -// routing_parameters { -// field: "table_name" -// path_template: "{project_id=projects/*}/instances/*/**" -// } -// routing_parameters { -// field: "table_name" -// path_template: "projects/*/{instance_id=instances/*}/**" -// } -// }; +// option (google.api.routing) = { +// // The routing code needs two keys instead of one composite +// // but works only for the tables with the "project-instance" name +// // syntax. +// +// routing_parameters { +// field: "table_name" +// path_template: "{project_id=projects/*}/instances/*/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "projects/*/{instance_id=instances/*}/**" +// } +// }; // // result: // -// x-goog-request-params: -// project_id=projects/proj_foo&instance_id=instances/instance_bar +// x-goog-request-params: +// project_id=projects/proj_foo&instance_id=instances/instance_bar // -// Sub-example 6b +// # Sub-example 6b // // Make the templates loose, so that if the `table_name` does not // have an instance information, just the project id part is sent. // // annotation: // -// option (google.api.routing) = { -// // The routing code wants two keys instead of one composite -// // but will work with just the `project_id` for tables without -// // an instance in the `table_name`. -// -// routing_parameters { -// field: "table_name" -// path_template: "{project_id=projects/*}/**" -// } -// routing_parameters { -// field: "table_name" -// path_template: "projects/*/{instance_id=instances/*}/**" -// } -// }; +// option (google.api.routing) = { +// // The routing code wants two keys instead of one composite +// // but will work with just the `project_id` for tables without +// // an instance in the `table_name`. +// +// routing_parameters { +// field: "table_name" +// path_template: "{project_id=projects/*}/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "projects/*/{instance_id=instances/*}/**" +// } +// }; // // result (is the same as 6a for our example message because it has the instance // information): // -// x-goog-request-params: -// project_id=projects/proj_foo&instance_id=instances/instance_bar +// x-goog-request-params: +// project_id=projects/proj_foo&instance_id=instances/instance_bar // -// Example 7 +// # Example 7 // // Extracting multiple routing header key-value pairs by matching // several path templates on multiple request fields. @@ -301,26 +301,26 @@ const ( // // annotation: // -// option (google.api.routing) = { -// // The routing needs both `project_id` and `routing_id` -// // (from the `app_profile_id` field) for routing. +// option (google.api.routing) = { +// // The routing needs both `project_id` and `routing_id` +// // (from the `app_profile_id` field) for routing. // -// routing_parameters { -// field: "table_name" -// path_template: "{project_id=projects/*}/**" -// } -// routing_parameters { -// field: "app_profile_id" -// path_template: "{routing_id=**}" -// } -// }; +// routing_parameters { +// field: "table_name" +// path_template: "{project_id=projects/*}/**" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// }; // // result: // -// x-goog-request-params: -// project_id=projects/proj_foo&routing_id=profiles/prof_qux +// x-goog-request-params: +// project_id=projects/proj_foo&routing_id=profiles/prof_qux // -// Example 8 +// # Example 8 // // Extracting a single routing header key-value pair by matching // several conflictingly named path templates on several request fields. The @@ -328,73 +328,73 @@ const ( // // annotation: // -// option (google.api.routing) = { -// // The `routing_id` can be a project id or a region id depending on -// // the table name format, but only if the `app_profile_id` is not set. -// // If `app_profile_id` is set it should be used instead. -// -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=projects/*}/**" -// } -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=regions/*}/**" -// } -// routing_parameters { -// field: "app_profile_id" -// path_template: "{routing_id=**}" -// } -// }; +// option (google.api.routing) = { +// // The `routing_id` can be a project id or a region id depending on +// // the table name format, but only if the `app_profile_id` is not set. +// // If `app_profile_id` is set it should be used instead. +// +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=regions/*}/**" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// }; // // result: // -// x-goog-request-params: routing_id=profiles/prof_qux +// x-goog-request-params: routing_id=profiles/prof_qux // -// Example 9 +// # Example 9 // // Bringing it all together. // // annotation: // -// option (google.api.routing) = { -// // For routing both `table_location` and a `routing_id` are needed. -// // -// // table_location can be either an instance id or a region+zone id. -// // -// // For `routing_id`, take the value of `app_profile_id` -// // - If it's in the format `profiles/`, send -// // just the `` part. -// // - If it's any other literal, send it as is. -// // If the `app_profile_id` is empty, and the `table_name` starts with -// // the project_id, send that instead. -// -// routing_parameters { -// field: "table_name" -// path_template: "projects/*/{table_location=instances/*}/tables/*" -// } -// routing_parameters { -// field: "table_name" -// path_template: "{table_location=regions/*/zones/*}/tables/*" -// } -// routing_parameters { -// field: "table_name" -// path_template: "{routing_id=projects/*}/**" -// } -// routing_parameters { -// field: "app_profile_id" -// path_template: "{routing_id=**}" -// } -// routing_parameters { -// field: "app_profile_id" -// path_template: "profiles/{routing_id=*}" -// } -// }; +// option (google.api.routing) = { +// // For routing both `table_location` and a `routing_id` are needed. +// // +// // table_location can be either an instance id or a region+zone id. +// // +// // For `routing_id`, take the value of `app_profile_id` +// // - If it's in the format `profiles/`, send +// // just the `` part. +// // - If it's any other literal, send it as is. +// // If the `app_profile_id` is empty, and the `table_name` starts with +// // the project_id, send that instead. +// +// routing_parameters { +// field: "table_name" +// path_template: "projects/*/{table_location=instances/*}/tables/*" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{table_location=regions/*/zones/*}/tables/*" +// } +// routing_parameters { +// field: "table_name" +// path_template: "{routing_id=projects/*}/**" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "{routing_id=**}" +// } +// routing_parameters { +// field: "app_profile_id" +// path_template: "profiles/{routing_id=*}" +// } +// }; // // result: // -// x-goog-request-params: -// table_location=instances/instance_bar&routing_id=prof_qux +// x-goog-request-params: +// table_location=instances/instance_bar&routing_id=prof_qux type RoutingRule struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go b/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go index 96ec674acf296..d3b909033836b 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/distribution/distribution.pb.go @@ -441,8 +441,8 @@ func (x *Distribution_Exemplar) GetAttachments() []*anypb.Any { // There are `num_finite_buckets + 2` (= N) buckets. Bucket `i` has the // following boundaries: // -// Upper bound (0 <= i < N-1): offset + (width * i). -// Lower bound (1 <= i < N): offset + (width * (i - 1)). +// Upper bound (0 <= i < N-1): offset + (width * i). +// Lower bound (1 <= i < N): offset + (width * (i - 1)). type Distribution_BucketOptions_Linear struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -516,8 +516,8 @@ func (x *Distribution_BucketOptions_Linear) GetOffset() float64 { // There are `num_finite_buckets + 2` (= N) buckets. Bucket `i` has the // following boundaries: // -// Upper bound (0 <= i < N-1): scale * (growth_factor ^ i). -// Lower bound (1 <= i < N): scale * (growth_factor ^ (i - 1)). +// Upper bound (0 <= i < N-1): scale * (growth_factor ^ i). +// Lower bound (1 <= i < N): scale * (growth_factor ^ (i - 1)). type Distribution_BucketOptions_Exponential struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -589,8 +589,8 @@ func (x *Distribution_BucketOptions_Exponential) GetScale() float64 { // There are `size(bounds) + 1` (= N) buckets. Bucket `i` has the following // boundaries: // -// Upper bound (0 <= i < N-1): bounds[i] -// Lower bound (1 <= i < N); bounds[i - 1] +// Upper bound (0 <= i < N-1): bounds[i] +// Lower bound (1 <= i < N); bounds[i - 1] // // The `bounds` field must contain at least one element. If `bounds` has // only one element, then there are no finite buckets, and that single diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go index 7ea5ced87e0f2..af72196c80951 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -40,7 +40,6 @@ const ( // payload formats that can't be represented as JSON, such as raw binary or // an HTML page. // -// // This message can be used both in streaming and non-streaming API methods in // the request as well as the response. // @@ -50,32 +49,32 @@ const ( // // Example: // -// message GetResourceRequest { -// // A unique request id. -// string request_id = 1; +// message GetResourceRequest { +// // A unique request id. +// string request_id = 1; // -// // The raw HTTP body is bound to this field. -// google.api.HttpBody http_body = 2; +// // The raw HTTP body is bound to this field. +// google.api.HttpBody http_body = 2; // -// } +// } // -// service ResourceService { -// rpc GetResource(GetResourceRequest) -// returns (google.api.HttpBody); -// rpc UpdateResource(google.api.HttpBody) -// returns (google.protobuf.Empty); +// service ResourceService { +// rpc GetResource(GetResourceRequest) +// returns (google.api.HttpBody); +// rpc UpdateResource(google.api.HttpBody) +// returns (google.protobuf.Empty); // -// } +// } // // Example with streaming methods: // -// service CaldavService { -// rpc GetCalendar(stream google.api.HttpBody) -// returns (stream google.api.HttpBody); -// rpc UpdateCalendar(stream google.api.HttpBody) -// returns (stream google.api.HttpBody); +// service CaldavService { +// rpc GetCalendar(stream google.api.HttpBody) +// returns (stream google.api.HttpBody); +// rpc UpdateCalendar(stream google.api.HttpBody) +// returns (stream google.api.HttpBody); // -// } +// } // // Use of this type only changes how the request and response bodies are // handled, all other features will continue to work unchanged. diff --git a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go index 36bf3ef6de0cc..7107531377325 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.18.1 // source: google/api/launch_stage.proto package api @@ -36,7 +36,7 @@ const ( ) // The launch stage as defined by [Google Cloud Platform -// Launch Stages](http://cloud.google.com/terms/launch-stages). +// Launch Stages](https://cloud.google.com/terms/launch-stages). type LaunchStage int32 const ( @@ -56,7 +56,7 @@ const ( // for widespread use. By Alpha, all significant design issues are resolved // and we are in the process of verifying functionality. Alpha customers // need to apply for access, agree to applicable terms, and have their - // projects allowlisted. Alpha releases don’t have to be feature complete, + // projects allowlisted. Alpha releases don't have to be feature complete, // no SLAs are provided, and there are no technical support obligations, but // they will be far enough along that customers can actually use them in // test environments or for limited-use tests -- just like they would in @@ -72,7 +72,7 @@ const ( // fully qualified for production use. LaunchStage_GA LaunchStage = 4 // Deprecated features are scheduled to be shut down and removed. For more - // information, see the “Deprecation Policy” section of our [Terms of + // information, see the "Deprecation Policy" section of our [Terms of // Service](https://cloud.google.com/terms/) // and the [Google Cloud Platform Subject to the Deprecation // Policy](https://cloud.google.com/terms/deprecation) documentation. diff --git a/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go index 1d88eb931f5bb..4f02c47cadb34 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/metric/metric.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.18.1 // source: google/api/metric.proto package metric @@ -175,7 +175,6 @@ func (MetricDescriptor_ValueType) EnumDescriptor() ([]byte, []int) { // Defines a metric type and its schema. Once a metric descriptor is created, // deleting or altering it stops data collection and makes the metric type's // existing data unusable. -// type MetricDescriptor struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -188,9 +187,9 @@ type MetricDescriptor struct { // `custom.googleapis.com` or `external.googleapis.com`. Metric types should // use a natural hierarchical grouping. For example: // - // "custom.googleapis.com/invoice/paid/amount" - // "external.googleapis.com/prometheus/up" - // "appengine.googleapis.com/http/server/response_latencies" + // "custom.googleapis.com/invoice/paid/amount" + // "external.googleapis.com/prometheus/up" + // "appengine.googleapis.com/http/server/response_latencies" Type string `protobuf:"bytes,8,opt,name=type,proto3" json:"type,omitempty"` // The set of labels that can be used to describe a specific // instance of this metric type. For example, the @@ -268,45 +267,45 @@ type MetricDescriptor struct { // // The grammar also includes these connectors: // - // * `/` division or ratio (as an infix operator). For examples, - // `kBy/{email}` or `MiBy/10ms` (although you should almost never - // have `/s` in a metric `unit`; rates should always be computed at - // query time from the underlying cumulative or delta value). - // * `.` multiplication or composition (as an infix operator). For - // examples, `GBy.d` or `k{watt}.h`. + // - `/` division or ratio (as an infix operator). For examples, + // `kBy/{email}` or `MiBy/10ms` (although you should almost never + // have `/s` in a metric `unit`; rates should always be computed at + // query time from the underlying cumulative or delta value). + // - `.` multiplication or composition (as an infix operator). For + // examples, `GBy.d` or `k{watt}.h`. // // The grammar for a unit is as follows: // - // Expression = Component { "." Component } { "/" Component } ; + // Expression = Component { "." Component } { "/" Component } ; // - // Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] - // | Annotation - // | "1" - // ; + // Component = ( [ PREFIX ] UNIT | "%" ) [ Annotation ] + // | Annotation + // | "1" + // ; // - // Annotation = "{" NAME "}" ; + // Annotation = "{" NAME "}" ; // // Notes: // - // * `Annotation` is just a comment if it follows a `UNIT`. If the annotation - // is used alone, then the unit is equivalent to `1`. For examples, - // `{request}/s == 1/s`, `By{transmitted}/s == By/s`. - // * `NAME` is a sequence of non-blank printable ASCII characters not - // containing `{` or `}`. - // * `1` represents a unitary [dimensionless - // unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such - // as in `1/s`. It is typically used when none of the basic units are - // appropriate. For example, "new users per day" can be represented as - // `1/d` or `{new-users}/d` (and a metric value `5` would mean "5 new - // users). Alternatively, "thousands of page views per day" would be - // represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric - // value of `5.3` would mean "5300 page views per day"). - // * `%` represents dimensionless value of 1/100, and annotates values giving - // a percentage (so the metric values are typically in the range of 0..100, - // and a metric value `3` means "3 percent"). - // * `10^2.%` indicates a metric contains a ratio, typically in the range - // 0..1, that will be multiplied by 100 and displayed as a percentage - // (so a metric value `0.03` means "3 percent"). + // - `Annotation` is just a comment if it follows a `UNIT`. If the annotation + // is used alone, then the unit is equivalent to `1`. For examples, + // `{request}/s == 1/s`, `By{transmitted}/s == By/s`. + // - `NAME` is a sequence of non-blank printable ASCII characters not + // containing `{` or `}`. + // - `1` represents a unitary [dimensionless + // unit](https://en.wikipedia.org/wiki/Dimensionless_quantity) of 1, such + // as in `1/s`. It is typically used when none of the basic units are + // appropriate. For example, "new users per day" can be represented as + // `1/d` or `{new-users}/d` (and a metric value `5` would mean "5 new + // users). Alternatively, "thousands of page views per day" would be + // represented as `1000/d` or `k1/d` or `k{page_views}/d` (and a metric + // value of `5.3` would mean "5300 page views per day"). + // - `%` represents dimensionless value of 1/100, and annotates values giving + // a percentage (so the metric values are typically in the range of 0..100, + // and a metric value `3` means "3 percent"). + // - `10^2.%` indicates a metric contains a ratio, typically in the range + // 0..1, that will be multiplied by 100 and displayed as a percentage + // (so a metric value `0.03` means "3 percent"). Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"` // A detailed description of the metric, which can be used in documentation. Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` diff --git a/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go index ed1fc17c65c9b..1eb0df9ffd72f 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/monitoredres/monitored_resource.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.18.1 // source: google/api/monitored_resource.proto package monitoredres @@ -47,7 +47,6 @@ const ( // Different APIs can support different monitored resource types. APIs generally // provide a `list` method that returns the monitored resource descriptors used // by the API. -// type MonitoredResourceDescriptor struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -62,6 +61,12 @@ type MonitoredResourceDescriptor struct { Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` // Required. The monitored resource type. For example, the type // `"cloudsql_database"` represents databases in Google Cloud SQL. + // + // For a list of types, see [Monitoring resource + // types](https://cloud.google.com/monitoring/api/resources) + // + // and [Logging resource + // types](https://cloud.google.com/logging/docs/api/v2/resource-list). Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` // Optional. A concise name for the monitored resource type that might be // displayed in user interfaces. It should be a Title Cased Noun Phrase, @@ -161,11 +166,12 @@ func (x *MonitoredResourceDescriptor) GetLaunchStage() api.LaunchStage { // its attributes according to the schema. For example, a particular Compute // Engine VM instance could be represented by the following object, because the // [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] for `"gce_instance"` has labels -// `"instance_id"` and `"zone"`: +// `"project_id"`, `"instance_id"` and `"zone"`: // -// { "type": "gce_instance", -// "labels": { "instance_id": "12345678901234", -// "zone": "us-central1-a" }} +// { "type": "gce_instance", +// "labels": { "project_id": "my-project", +// "instance_id": "12345678901234", +// "zone": "us-central1-a" }} type MonitoredResource struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -174,6 +180,8 @@ type MonitoredResource struct { // Required. The monitored resource type. This field must match // the `type` field of a [MonitoredResourceDescriptor][google.api.MonitoredResourceDescriptor] object. For // example, the type of a Compute Engine VM instance is `gce_instance`. + // Some descriptors include the service name in the type; for example, + // the type of a Datastream stream is `datastream.googleapis.com/Stream`. Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` // Required. Values for all of the labels listed in the associated monitored // resource descriptor. For example, Compute Engine VM instances use the @@ -245,9 +253,9 @@ type MonitoredResourceMetadata struct { // System label values can be only strings, Boolean values, or a list of // strings. For example: // - // { "name": "my-test-instance", - // "security_group": ["a", "b", "c"], - // "spot_instance": false } + // { "name": "my-test-instance", + // "security_group": ["a", "b", "c"], + // "spot_instance": false } SystemLabels *structpb.Struct `protobuf:"bytes,1,opt,name=system_labels,json=systemLabels,proto3" json:"system_labels,omitempty"` // Output only. A map of user-defined metadata labels. UserLabels map[string]string `protobuf:"bytes,2,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` diff --git a/vendor/google.golang.org/genproto/googleapis/logging/type/log_severity.pb.go b/vendor/google.golang.org/genproto/googleapis/logging/type/log_severity.pb.go index d8e4d17162751..23396a755a2b6 100644 --- a/vendor/google.golang.org/genproto/googleapis/logging/type/log_severity.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/logging/type/log_severity.pb.go @@ -44,7 +44,7 @@ const ( // filter expression will match log entries with severities `INFO`, `NOTICE`, // and `WARNING`: // -// severity > DEBUG AND severity <= WARNING +// severity > DEBUG AND severity <= WARNING // // If you are writing log entries, you should map other severity encodings to // one of these standard levels. For example, you might map all of Java's FINE, diff --git a/vendor/google.golang.org/genproto/googleapis/longrunning/alias.go b/vendor/google.golang.org/genproto/googleapis/longrunning/alias.go new file mode 100644 index 0000000000000..3addf3b11b1db --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/longrunning/alias.go @@ -0,0 +1,115 @@ +// Copyright 2022 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by aliasgen. DO NOT EDIT. + +// Package longrunning aliases all exported identifiers in package +// "cloud.google.com/go/longrunning/autogen/longrunningpb". +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb. +// Please read https://github.com/googleapis/google-cloud-go/blob/main/migration.md +// for more details. +package longrunning + +import ( + src "cloud.google.com/go/longrunning/autogen/longrunningpb" + grpc "google.golang.org/grpc" +) + +// Deprecated: Please use vars in: cloud.google.com/go/longrunning/autogen/longrunningpb +var ( + E_OperationInfo = src.E_OperationInfo + File_google_longrunning_operations_proto = src.File_google_longrunning_operations_proto +) + +// The request message for +// [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type CancelOperationRequest = src.CancelOperationRequest + +// The request message for +// [Operations.DeleteOperation][google.longrunning.Operations.DeleteOperation]. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type DeleteOperationRequest = src.DeleteOperationRequest + +// The request message for +// [Operations.GetOperation][google.longrunning.Operations.GetOperation]. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type GetOperationRequest = src.GetOperationRequest + +// The request message for +// [Operations.ListOperations][google.longrunning.Operations.ListOperations]. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type ListOperationsRequest = src.ListOperationsRequest + +// The response message for +// [Operations.ListOperations][google.longrunning.Operations.ListOperations]. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type ListOperationsResponse = src.ListOperationsResponse + +// This resource represents a long-running operation that is the result of a +// network API call. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type Operation = src.Operation + +// A message representing the message types used by a long-running operation. +// Example: rpc LongRunningRecognize(LongRunningRecognizeRequest) returns +// (google.longrunning.Operation) { option (google.longrunning.operation_info) +// = { response_type: "LongRunningRecognizeResponse" metadata_type: +// "LongRunningRecognizeMetadata" }; } +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type OperationInfo = src.OperationInfo +type Operation_Error = src.Operation_Error +type Operation_Response = src.Operation_Response + +// OperationsClient is the client API for Operations service. For semantics +// around ctx use and closing/ending streaming RPCs, please refer to +// https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type OperationsClient = src.OperationsClient + +// OperationsServer is the server API for Operations service. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type OperationsServer = src.OperationsServer + +// UnimplementedOperationsServer can be embedded to have forward compatible +// implementations. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type UnimplementedOperationsServer = src.UnimplementedOperationsServer + +// The request message for +// [Operations.WaitOperation][google.longrunning.Operations.WaitOperation]. +// +// Deprecated: Please use types in: cloud.google.com/go/longrunning/autogen/longrunningpb +type WaitOperationRequest = src.WaitOperationRequest + +// Deprecated: Please use funcs in: cloud.google.com/go/longrunning/autogen/longrunningpb +func NewOperationsClient(cc grpc.ClientConnInterface) OperationsClient { + return src.NewOperationsClient(cc) +} + +// Deprecated: Please use funcs in: cloud.google.com/go/longrunning/autogen/longrunningpb +func RegisterOperationsServer(s *grpc.Server, srv OperationsServer) { + src.RegisterOperationsServer(s, srv) +} diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go index 1258803152f3e..cc5d52fbcc3a2 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/code/code.pb.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/rpc/code.proto package code @@ -37,7 +37,6 @@ const ( // The canonical error codes for gRPC APIs. // -// // Sometimes multiple error codes may apply. Services should return // the most specific error code that applies. For example, prefer // `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply. @@ -45,7 +44,7 @@ const ( type Code int32 const ( - // Not an error; returned on success + // Not an error; returned on success. // // HTTP Mapping: 200 OK Code_OK Code = 0 @@ -79,7 +78,7 @@ const ( // Some requested entity (e.g., file or directory) was not found. // // Note to server developers: if a request is denied for an entire class - // of users, such as gradual feature rollout or undocumented whitelist, + // of users, such as gradual feature rollout or undocumented allowlist, // `NOT_FOUND` may be used. If a request is denied for some users within // a class of users, such as user-based access control, `PERMISSION_DENIED` // must be used. @@ -119,15 +118,16 @@ const ( // // Service implementors can use the following guidelines to decide // between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`: - // (a) Use `UNAVAILABLE` if the client can retry just the failing call. - // (b) Use `ABORTED` if the client should retry at a higher level - // (e.g., when a client-specified test-and-set fails, indicating the - // client should restart a read-modify-write sequence). - // (c) Use `FAILED_PRECONDITION` if the client should not retry until - // the system state has been explicitly fixed. E.g., if an "rmdir" - // fails because the directory is non-empty, `FAILED_PRECONDITION` - // should be returned since the client should not retry unless - // the files are deleted from the directory. + // + // (a) Use `UNAVAILABLE` if the client can retry just the failing call. + // (b) Use `ABORTED` if the client should retry at a higher level. For + // example, when a client-specified test-and-set fails, indicating the + // client should restart a read-modify-write sequence. + // (c) Use `FAILED_PRECONDITION` if the client should not retry until + // the system state has been explicitly fixed. For example, if an "rmdir" + // fails because the directory is non-empty, `FAILED_PRECONDITION` + // should be returned since the client should not retry unless + // the files are deleted from the directory. // // HTTP Mapping: 400 Bad Request Code_FAILED_PRECONDITION Code = 9 diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go index 1c7b93ec160b2..7bd161e48ad7a 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/rpc/error_details.proto package errdetails @@ -36,6 +36,112 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +// Describes the cause of the error with structured details. +// +// Example of an error when contacting the "pubsub.googleapis.com" API when it +// is not enabled: +// +// { "reason": "API_DISABLED" +// "domain": "googleapis.com" +// "metadata": { +// "resource": "projects/123", +// "service": "pubsub.googleapis.com" +// } +// } +// +// This response indicates that the pubsub.googleapis.com API is not enabled. +// +// Example of an error that is returned when attempting to create a Spanner +// instance in a region that is out of stock: +// +// { "reason": "STOCKOUT" +// "domain": "spanner.googleapis.com", +// "metadata": { +// "availableRegions": "us-central1,us-east2" +// } +// } +type ErrorInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The reason of the error. This is a constant value that identifies the + // proximate cause of the error. Error reasons are unique within a particular + // domain of errors. This should be at most 63 characters and match a + // regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`, which represents + // UPPER_SNAKE_CASE. + Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"` + // The logical grouping to which the "reason" belongs. The error domain + // is typically the registered service name of the tool or product that + // generates the error. Example: "pubsub.googleapis.com". If the error is + // generated by some common infrastructure, the error domain must be a + // globally unique value that identifies the infrastructure. For Google API + // infrastructure, the error domain is "googleapis.com". + Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` + // Additional structured details about this error. + // + // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in + // length. When identifying the current value of an exceeded limit, the units + // should be contained in the key, not the value. For example, rather than + // {"instanceLimit": "100/request"}, should be returned as, + // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of + // instances that can be created in a single (batch) request. + Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ErrorInfo) Reset() { + *x = ErrorInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_google_rpc_error_details_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorInfo) ProtoMessage() {} + +func (x *ErrorInfo) ProtoReflect() protoreflect.Message { + mi := &file_google_rpc_error_details_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorInfo.ProtoReflect.Descriptor instead. +func (*ErrorInfo) Descriptor() ([]byte, []int) { + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{0} +} + +func (x *ErrorInfo) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *ErrorInfo) GetDomain() string { + if x != nil { + return x.Domain + } + return "" +} + +func (x *ErrorInfo) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + // Describes when the clients can retry a failed request. Clients could ignore // the recommendation here or retry when this information is missing from error // responses. @@ -61,7 +167,7 @@ type RetryInfo struct { func (x *RetryInfo) Reset() { *x = RetryInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[0] + mi := &file_google_rpc_error_details_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -74,7 +180,7 @@ func (x *RetryInfo) String() string { func (*RetryInfo) ProtoMessage() {} func (x *RetryInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[0] + mi := &file_google_rpc_error_details_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -87,7 +193,7 @@ func (x *RetryInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use RetryInfo.ProtoReflect.Descriptor instead. func (*RetryInfo) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{0} + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{1} } func (x *RetryInfo) GetRetryDelay() *durationpb.Duration { @@ -112,7 +218,7 @@ type DebugInfo struct { func (x *DebugInfo) Reset() { *x = DebugInfo{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[1] + mi := &file_google_rpc_error_details_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -125,7 +231,7 @@ func (x *DebugInfo) String() string { func (*DebugInfo) ProtoMessage() {} func (x *DebugInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[1] + mi := &file_google_rpc_error_details_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -138,7 +244,7 @@ func (x *DebugInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use DebugInfo.ProtoReflect.Descriptor instead. func (*DebugInfo) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{1} + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{2} } func (x *DebugInfo) GetStackEntries() []string { @@ -178,7 +284,7 @@ type QuotaFailure struct { func (x *QuotaFailure) Reset() { *x = QuotaFailure{} if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[2] + mi := &file_google_rpc_error_details_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -191,7 +297,7 @@ func (x *QuotaFailure) String() string { func (*QuotaFailure) ProtoMessage() {} func (x *QuotaFailure) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[2] + mi := &file_google_rpc_error_details_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -204,7 +310,7 @@ func (x *QuotaFailure) ProtoReflect() protoreflect.Message { // Deprecated: Use QuotaFailure.ProtoReflect.Descriptor instead. func (*QuotaFailure) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{2} + return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3} } func (x *QuotaFailure) GetViolations() []*QuotaFailure_Violation { @@ -214,111 +320,6 @@ func (x *QuotaFailure) GetViolations() []*QuotaFailure_Violation { return nil } -// Describes the cause of the error with structured details. -// -// Example of an error when contacting the "pubsub.googleapis.com" API when it -// is not enabled: -// -// { "reason": "API_DISABLED" -// "domain": "googleapis.com" -// "metadata": { -// "resource": "projects/123", -// "service": "pubsub.googleapis.com" -// } -// } -// -// This response indicates that the pubsub.googleapis.com API is not enabled. -// -// Example of an error that is returned when attempting to create a Spanner -// instance in a region that is out of stock: -// -// { "reason": "STOCKOUT" -// "domain": "spanner.googleapis.com", -// "metadata": { -// "availableRegions": "us-central1,us-east2" -// } -// } -type ErrorInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The reason of the error. This is a constant value that identifies the - // proximate cause of the error. Error reasons are unique within a particular - // domain of errors. This should be at most 63 characters and match - // /[A-Z0-9_]+/. - Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"` - // The logical grouping to which the "reason" belongs. The error domain - // is typically the registered service name of the tool or product that - // generates the error. Example: "pubsub.googleapis.com". If the error is - // generated by some common infrastructure, the error domain must be a - // globally unique value that identifies the infrastructure. For Google API - // infrastructure, the error domain is "googleapis.com". - Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` - // Additional structured details about this error. - // - // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in - // length. When identifying the current value of an exceeded limit, the units - // should be contained in the key, not the value. For example, rather than - // {"instanceLimit": "100/request"}, should be returned as, - // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of - // instances that can be created in a single (batch) request. - Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (x *ErrorInfo) Reset() { - *x = ErrorInfo{} - if protoimpl.UnsafeEnabled { - mi := &file_google_rpc_error_details_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ErrorInfo) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ErrorInfo) ProtoMessage() {} - -func (x *ErrorInfo) ProtoReflect() protoreflect.Message { - mi := &file_google_rpc_error_details_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ErrorInfo.ProtoReflect.Descriptor instead. -func (*ErrorInfo) Descriptor() ([]byte, []int) { - return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3} -} - -func (x *ErrorInfo) GetReason() string { - if x != nil { - return x.Reason - } - return "" -} - -func (x *ErrorInfo) GetDomain() string { - if x != nil { - return x.Domain - } - return "" -} - -func (x *ErrorInfo) GetMetadata() map[string]string { - if x != nil { - return x.Metadata - } - return nil -} - // Describes what preconditions have failed. // // For example, if an RPC failed because it required the Terms of Service to be @@ -495,7 +496,8 @@ type ResourceInfo struct { ResourceType string `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"` // The name of the resource being accessed. For example, a shared calendar // name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current - // error is [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. + // error is + // [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. ResourceName string `protobuf:"bytes,2,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` // The owner of the resource (optional). // For example, "user:" or "project: google.protobuf.Duration - 10, // 1: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation - 11, // 2: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry + 10, // 0: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry + 15, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration + 11, // 2: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation 12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation 13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation 14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link @@ -1089,7 +1125,7 @@ func file_google_rpc_error_details_proto_init() { } if !protoimpl.UnsafeEnabled { file_google_rpc_error_details_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RetryInfo); i { + switch v := v.(*ErrorInfo); i { case 0: return &v.state case 1: @@ -1101,7 +1137,7 @@ func file_google_rpc_error_details_proto_init() { } } file_google_rpc_error_details_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DebugInfo); i { + switch v := v.(*RetryInfo); i { case 0: return &v.state case 1: @@ -1113,7 +1149,7 @@ func file_google_rpc_error_details_proto_init() { } } file_google_rpc_error_details_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QuotaFailure); i { + switch v := v.(*DebugInfo); i { case 0: return &v.state case 1: @@ -1125,7 +1161,7 @@ func file_google_rpc_error_details_proto_init() { } } file_google_rpc_error_details_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ErrorInfo); i { + switch v := v.(*QuotaFailure); i { case 0: return &v.state case 1: @@ -1208,7 +1244,7 @@ func file_google_rpc_error_details_proto_init() { return nil } } - file_google_rpc_error_details_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + file_google_rpc_error_details_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*QuotaFailure_Violation); i { case 0: return &v.state diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go index f34a38e4e95f6..a6b5081888ba4 100644 --- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2022 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.12.2 +// protoc v3.21.9 // source: google/rpc/status.proto package status @@ -48,11 +48,13 @@ type Status struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + // The status code, which should be an enum value of + // [google.rpc.Code][google.rpc.Code]. Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` // A developer-facing error message, which should be in English. Any // user-facing error message should be localized and sent in the - // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized + // by the client. Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` // A list of messages that carry the error details. There is a common set of // message types for APIs to use. diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index ae13ddac14e02..02f5dc5318914 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -19,7 +19,7 @@ // Package attributes defines a generic key/value store used in various gRPC // components. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go index 542594f5cc512..29475e31c979e 100644 --- a/vendor/google.golang.org/grpc/backoff.go +++ b/vendor/google.golang.org/grpc/backoff.go @@ -48,7 +48,7 @@ type BackoffConfig struct { // here for more details: // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index f4f9408f38529..09d61dd1b55b4 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -110,6 +110,11 @@ type SubConn interface { UpdateAddresses([]resolver.Address) // Connect starts the connecting for this SubConn. Connect() + // GetOrBuildProducer returns a reference to the existing Producer for this + // ProducerBuilder in this SubConn, or, if one does not currently exist, + // creates a new one and returns it. Returns a close function which must + // be called when the Producer is no longer needed. + GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) } // NewSubConnOptions contains options to create new SubConn. @@ -274,6 +279,14 @@ type PickResult struct { // type, Done may not be called. May be nil if the balancer does not wish // to be notified when the RPC completes. Done func(DoneInfo) + + // Metadata provides a way for LB policies to inject arbitrary per-call + // metadata. Any metadata returned here will be merged with existing + // metadata added by the client application. + // + // LB policies with child policies are responsible for propagating metadata + // injected by their children to the ClientConn, as part of Pick(). + Metatada metadata.MD } // TransientFailureError returns e. It exists for backward compatibility and @@ -371,3 +384,21 @@ type ClientConnState struct { // ErrBadResolverState may be returned by UpdateClientConnState to indicate a // problem with the provided name resolver data. var ErrBadResolverState = errors.New("bad resolver state") + +// A ProducerBuilder is a simple constructor for a Producer. It is used by the +// SubConn to create producers when needed. +type ProducerBuilder interface { + // Build creates a Producer. The first parameter is always a + // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the + // associated SubConn), but is declared as interface{} to avoid a + // dependency cycle. Should also return a close function that will be + // called when all references to the Producer have been given up. + Build(grpcClientConnInterface interface{}) (p Producer, close func()) +} + +// A Producer is a type shared among potentially many consumers. It is +// associated with a SubConn, and an implementation will typically contain +// other methods to provide additional functionality, e.g. configuration or +// subscription registration. +type Producer interface { +} diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go index e8dfc828aaacb..3929c26d31e1c 100644 --- a/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -157,8 +157,8 @@ func (b *baseBalancer) mergeErrors() error { // regeneratePicker takes a snapshot of the balancer, and generates a picker // from it. The picker is -// - errPicker if the balancer is in TransientFailure, -// - built by the pickerBuilder with all READY SubConns otherwise. +// - errPicker if the balancer is in TransientFailure, +// - built by the pickerBuilder with all READY SubConns otherwise. func (b *baseBalancer) regeneratePicker() { if b.state == connectivity.TransientFailure { b.picker = NewErrPicker(b.mergeErrors()) diff --git a/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go index a87b6809af386..c33413581091e 100644 --- a/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go +++ b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go @@ -34,10 +34,10 @@ type ConnectivityStateEvaluator struct { // RecordTransition records state change happening in subConn and based on that // it evaluates what aggregated state should be. // -// - If at least one SubConn in Ready, the aggregated state is Ready; -// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; -// - Else if at least one SubConn is Idle, the aggregated state is Idle; -// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure. +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting, the aggregated state is Connecting; +// - Else if at least one SubConn is Idle, the aggregated state is Idle; +// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure. // // Shutdown is not considered. func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State { @@ -55,7 +55,11 @@ func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState conne cse.numIdle += updateVal } } + return cse.CurrentState() +} +// CurrentState returns the current aggregate conn state by evaluating the counters +func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State { // Evaluate. if cse.numReady > 0 { return connectivity.Ready diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go index c393d7ffd3b2e..1205aff23f792 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer.pb.go @@ -19,14 +19,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/lb/v1/load_balancer.proto package grpc_lb_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" @@ -42,16 +41,13 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type LoadBalanceRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Types that are assignable to LoadBalanceRequestType: + // // *LoadBalanceRequest_InitialRequest // *LoadBalanceRequest_ClientStats LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"` @@ -340,6 +336,7 @@ type LoadBalanceResponse struct { unknownFields protoimpl.UnknownFields // Types that are assignable to LoadBalanceResponseType: + // // *LoadBalanceResponse_InitialResponse // *LoadBalanceResponse_ServerList // *LoadBalanceResponse_FallbackResponse diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go index 6c3402e36c605..6d698229a342a 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go @@ -19,7 +19,8 @@ // Package grpclb defines a grpclb balancer. // // To install grpclb balancer, import this package as: -// import _ "google.golang.org/grpc/balancer/grpclb" +// +// import _ "google.golang.org/grpc/balancer/grpclb" package grpclb import ( @@ -135,8 +136,8 @@ func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) bal lb := &lbBalancer{ cc: newLBCacheClientConn(cc), - dialTarget: opt.Target.Endpoint, - target: opt.Target.Endpoint, + dialTarget: opt.Target.Endpoint(), + target: opt.Target.Endpoint(), opt: opt, fallbackTimeout: b.fallbackTimeout, doneCh: make(chan struct{}), @@ -229,8 +230,9 @@ type lbBalancer struct { // regeneratePicker takes a snapshot of the balancer, and generates a picker from // it. The picker -// - always returns ErrTransientFailure if the balancer is in TransientFailure, -// - does two layer roundrobin pick otherwise. +// - always returns ErrTransientFailure if the balancer is in TransientFailure, +// - does two layer roundrobin pick otherwise. +// // Caller must hold lb.mu. func (lb *lbBalancer) regeneratePicker(resetDrop bool) { if lb.state == connectivity.TransientFailure { @@ -290,14 +292,14 @@ func (lb *lbBalancer) regeneratePicker(resetDrop bool) { // fallback and grpclb). lb.scState contains states for all SubConns, including // those in cache (SubConns are cached for 10 seconds after remove). // -// The aggregated state is: -// - If at least one SubConn in Ready, the aggregated state is Ready; -// - Else if at least one SubConn in Connecting or IDLE, the aggregated state is Connecting; -// - It's OK to consider IDLE as Connecting. SubConns never stay in IDLE, -// they start to connect immediately. But there's a race between the overall -// state is reported, and when the new SubConn state arrives. And SubConns -// never go back to IDLE. -// - Else the aggregated state is TransientFailure. +// The aggregated state is: +// - If at least one SubConn in Ready, the aggregated state is Ready; +// - Else if at least one SubConn in Connecting or IDLE, the aggregated state is Connecting; +// - It's OK to consider IDLE as Connecting. SubConns never stay in IDLE, +// they start to connect immediately. But there's a race between the overall +// state is reported, and when the new SubConn state arrives. And SubConns +// never go back to IDLE. +// - Else the aggregated state is TransientFailure. func (lb *lbBalancer) aggregateSubConnStates() connectivity.State { var numConnecting uint64 diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go index dab1959418e12..e56006d7131a8 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go @@ -332,7 +332,7 @@ func (ccw *remoteBalancerCCWrapper) callRemoteBalancer(ctx context.Context) (bac lbClient := &loadBalancerClient{cc: ccw.cc} stream, err := lbClient.BalanceLoad(ctx, grpc.WaitForReady(true)) if err != nil { - return true, fmt.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err) + return true, fmt.Errorf("grpclb: failed to perform RPC to the remote balancer: %v", err) } ccw.lb.mu.Lock() ccw.lb.remoteBalancerConnected = true diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index b1c23eaae0db2..0359956d36fa3 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -19,17 +19,20 @@ package grpc import ( + "context" "fmt" "strings" "sync" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" ) // ccBalancerWrapper sits between the ClientConn and the Balancer. @@ -305,7 +308,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } - acbw := &acBalancerWrapper{ac: ac} + acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} acbw.ac.mu.Lock() ac.acbw = acbw acbw.ac.mu.Unlock() @@ -359,8 +362,9 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { - mu sync.Mutex - ac *addrConn + mu sync.Mutex + ac *addrConn + producers map[balancer.ProducerBuilder]*refCountedProducer } func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { @@ -414,3 +418,64 @@ func (acbw *acBalancerWrapper) getAddrConn() *addrConn { defer acbw.mu.Unlock() return acbw.ac } + +var errSubConnNotReady = status.Error(codes.Unavailable, "SubConn not currently connected") + +// NewStream begins a streaming RPC on the addrConn. If the addrConn is not +// ready, returns errSubConnNotReady. +func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + transport := acbw.ac.getReadyTransport() + if transport == nil { + return nil, errSubConnNotReady + } + return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) +} + +// Invoke performs a unary RPC. If the addrConn is not ready, returns +// errSubConnNotReady. +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error { + cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) + if err != nil { + return err + } + if err := cs.SendMsg(args); err != nil { + return err + } + return cs.RecvMsg(reply) +} + +type refCountedProducer struct { + producer balancer.Producer + refs int // number of current refs to the producer + close func() // underlying producer's close function +} + +func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + + // Look up existing producer from this builder. + pData := acbw.producers[pb] + if pData == nil { + // Not found; create a new one and add it to the producers map. + p, close := pb.Build(acbw) + pData = &refCountedProducer{producer: p, close: close} + acbw.producers[pb] = pData + } + // Account for this new reference. + pData.refs++ + + // Return a cleanup function wrapped in a OnceFunc to remove this reference + // and delete the refCountedProducer from the map if the total reference + // count goes to zero. + unref := func() { + acbw.mu.Lock() + pData.refs-- + if pData.refs == 0 { + defer pData.close() // Run outside the acbw mutex + delete(acbw.producers, pb) + } + acbw.mu.Unlock() + } + return pData.producer, grpcsync.OnceFunc(unref) +} diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index ed75290cdf347..66d141fce707b 100644 --- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,14 +18,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/binlog/v1/binarylog.proto package grpc_binarylog_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" @@ -41,10 +40,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // Enumerates the type of event // Note the terminology is different from the RPC semantics // definition, but the same meaning is expressed here. @@ -261,6 +256,7 @@ type GrpcLogEntry struct { // according to the type of the log entry. // // Types that are assignable to Payload: + // // *GrpcLogEntry_ClientHeader // *GrpcLogEntry_ServerHeader // *GrpcLogEntry_Message @@ -694,12 +690,12 @@ func (x *Message) GetData() []byte { // Header keys added by gRPC are omitted. To be more specific, // implementations will not log the following entries, and this is // not to be treated as a truncation: -// - entries handled by grpc that are not user visible, such as those -// that begin with 'grpc-' (with exception of grpc-trace-bin) -// or keys like 'lb-token' -// - transport specific entries, including but not limited to: -// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc -// - entries added for call credentials +// - entries handled by grpc that are not user visible, such as those +// that begin with 'grpc-' (with exception of grpc-trace-bin) +// or keys like 'lb-token' +// - transport specific entries, including but not limited to: +// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc +// - entries added for call credentials // // Implementations must always log grpc-trace-bin if it is present. // Practically speaking it will only be visible on server side because diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go index a220c47c59a50..32b7fa5794e18 100644 --- a/vendor/google.golang.org/grpc/channelz/channelz.go +++ b/vendor/google.golang.org/grpc/channelz/channelz.go @@ -23,7 +23,7 @@ // https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by // the `internal/channelz` package. // -// Experimental +// # Experimental // // Notice: All APIs in this package are experimental and may be removed in a // later release. diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 779b03bca1c31..d607d4e9e243e 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -256,7 +256,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * if err != nil { return nil, err } - cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint, cc.target, cc.dopts) + cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint(), cc.target, cc.dopts) if err != nil { return nil, err } @@ -503,7 +503,7 @@ type ClientConn struct { // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -522,7 +522,7 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec // GetState returns the connectivity.State of ClientConn. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. @@ -534,7 +534,7 @@ func (cc *ClientConn) GetState() connectivity.State { // the channel is idle. Does not wait for the connection attempts to begin // before returning. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. @@ -761,7 +761,7 @@ func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric { // Target returns the target string of the ClientConn. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -788,10 +788,16 @@ func (cc *ClientConn) incrCallsFailed() { func (ac *addrConn) connect() error { ac.mu.Lock() if ac.state == connectivity.Shutdown { + if logger.V(2) { + logger.Infof("connect called on shutdown addrConn; ignoring.") + } ac.mu.Unlock() return errConnClosing } if ac.state != connectivity.Idle { + if logger.V(2) { + logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state) + } ac.mu.Unlock() return nil } @@ -831,9 +837,9 @@ func equalAddresses(a, b []resolver.Address) bool { // // If ac is Ready, it checks whether current connected address of ac is in the // new addrs list. -// - If true, it updates ac.addrs and returns true. The ac will keep using -// the existing connection. -// - If false, it does nothing and returns false. +// - If true, it updates ac.addrs and returns true. The ac will keep using +// the existing connection. +// - If false, it does nothing and returns false. func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { ac.mu.Lock() defer ac.mu.Unlock() @@ -928,7 +934,7 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { return cc.sc.healthCheckConfig } -func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) { +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, @@ -998,7 +1004,7 @@ func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { // However, if a previously unavailable network becomes available, this may be // used to trigger an immediate reconnect. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -1228,111 +1234,79 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T // address was not successfully connected, or updates ac appropriately with the // new transport. func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { - // TODO: Delete prefaceReceived and move the logic to wait for it into the - // transport. - prefaceReceived := grpcsync.NewEvent() - connClosed := grpcsync.NewEvent() - addr.ServerName = ac.cc.getServerName(addr) hctx, hcancel := context.WithCancel(ac.ctx) - hcStarted := false // protected by ac.mu - onClose := func() { + onClose := func(r transport.GoAwayReason) { ac.mu.Lock() defer ac.mu.Unlock() - defer connClosed.Fire() - defer hcancel() - if !hcStarted || hctx.Err() != nil { - // We didn't start the health check or set the state to READY, so - // no need to do anything else here. - // - // OR, we have already cancelled the health check context, meaning - // we have already called onClose once for this transport. In this - // case it would be dangerous to clear the transport and update the - // state, since there may be a new transport in this addrConn. + // adjust params based on GoAwayReason + ac.adjustParams(r) + if ac.state == connectivity.Shutdown { + // Already shut down. tearDown() already cleared the transport and + // canceled hctx via ac.ctx, and we expected this connection to be + // closed, so do nothing here. + return + } + hcancel() + if ac.transport == nil { + // We're still connecting to this address, which could error. Do + // not update the connectivity state or resolve; these will happen + // at the end of the tryAllAddrs connection loop in the event of an + // error. return } ac.transport = nil - // Refresh the name resolver + // Refresh the name resolver on any connection loss. ac.cc.resolveNow(resolver.ResolveNowOptions{}) - if ac.state != connectivity.Shutdown { - ac.updateConnectivityState(connectivity.Idle, nil) - } - } - - onGoAway := func(r transport.GoAwayReason) { - ac.mu.Lock() - ac.adjustParams(r) - ac.mu.Unlock() - onClose() + // Always go idle and wait for the LB policy to initiate a new + // connection attempt. + ac.updateConnectivityState(connectivity.Idle, nil) } connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) defer cancel() copts.ChannelzParentID = ac.channelzID - newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, func() { prefaceReceived.Fire() }, onGoAway, onClose) + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onClose) if err != nil { + if logger.V(2) { + logger.Infof("Creating new client transport to %q: %v", addr, err) + } // newTr is either nil, or closed. hcancel() channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err) return err } - select { - case <-connectCtx.Done(): - // We didn't get the preface in time. + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.state == connectivity.Shutdown { + // This can happen if the subConn was removed while in `Connecting` + // state. tearDown() would have set the state to `Shutdown`, but + // would not have closed the transport since ac.transport would not + // have been set at that point. + // + // We run this in a goroutine because newTr.Close() calls onClose() + // inline, which requires locking ac.mu. + // // The error we pass to Close() is immaterial since there are no open // streams at this point, so no trailers with error details will be sent // out. We just need to pass a non-nil error. - newTr.Close(transport.ErrConnClosing) - if connectCtx.Err() == context.DeadlineExceeded { - err := errors.New("failed to receive server preface within timeout") - channelz.Warningf(logger, ac.channelzID, "grpc: addrConn.createTransport failed to connect to %s: %v", addr, err) - return err - } + go newTr.Close(transport.ErrConnClosing) return nil - case <-prefaceReceived.Done(): - // We got the preface - huzzah! things are good. - ac.mu.Lock() - defer ac.mu.Unlock() - if connClosed.HasFired() { - // onClose called first; go idle but do nothing else. - if ac.state != connectivity.Shutdown { - ac.updateConnectivityState(connectivity.Idle, nil) - } - return nil - } - if ac.state == connectivity.Shutdown { - // This can happen if the subConn was removed while in `Connecting` - // state. tearDown() would have set the state to `Shutdown`, but - // would not have closed the transport since ac.transport would not - // been set at that point. - // - // We run this in a goroutine because newTr.Close() calls onClose() - // inline, which requires locking ac.mu. - // - // The error we pass to Close() is immaterial since there are no open - // streams at this point, so no trailers with error details will be sent - // out. We just need to pass a non-nil error. - go newTr.Close(transport.ErrConnClosing) - return nil - } - ac.curAddr = addr - ac.transport = newTr - hcStarted = true - ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + } + if hctx.Err() != nil { + // onClose was already called for this connection, but the connection + // was successfully established first. Consider it a success and set + // the new state to Idle. + ac.updateConnectivityState(connectivity.Idle, nil) return nil - case <-connClosed.Done(): - // The transport has already closed. If we received the preface, too, - // this is not an error. - select { - case <-prefaceReceived.Done(): - return nil - default: - return errors.New("connection closed before server preface received") - } } + ac.curAddr = addr + ac.transport = newTr + ac.startHealthCheck(hctx) // Will set state to READY if appropriate. + return nil } // startHealthCheck starts the health checking stream (RPC) to watch the health @@ -1402,7 +1376,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { if status.Code(err) == codes.Unimplemented { channelz.Error(logger, ac.channelzID, "Subchannel health check is unimplemented at server side, thus health check is disabled") } else { - channelz.Errorf(logger, ac.channelzID, "HealthCheckFunc exits with unexpected error %v", err) + channelz.Errorf(logger, ac.channelzID, "Health checking failed: %v", err) } } }() @@ -1583,7 +1557,7 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) } else { channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) - rb = cc.getResolver(parsedTarget.Scheme) + rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget return rb, nil @@ -1604,39 +1578,26 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { return nil, err } channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) - rb = cc.getResolver(parsedTarget.Scheme) + rb = cc.getResolver(parsedTarget.URL.Scheme) if rb == nil { - return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.Scheme) + return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) } cc.parsedTarget = parsedTarget return rb, nil } // parseTarget uses RFC 3986 semantics to parse the given target into a -// resolver.Target struct containing scheme, authority and endpoint. Query +// resolver.Target struct containing scheme, authority and url. Query // params are stripped from the endpoint. func parseTarget(target string) (resolver.Target, error) { u, err := url.Parse(target) if err != nil { return resolver.Target{}, err } - // For targets of the form "[scheme]://[authority]/endpoint, the endpoint - // value returned from url.Parse() contains a leading "/". Although this is - // in accordance with RFC 3986, we do not want to break existing resolver - // implementations which expect the endpoint without the leading "/". So, we - // end up stripping the leading "/" here. But this will result in an - // incorrect parsing for something like "unix:///path/to/socket". Since we - // own the "unix" resolver, we can workaround in the unix resolver by using - // the `URL` field instead of the `Endpoint` field. - endpoint := u.Path - if endpoint == "" { - endpoint = u.Opaque - } - endpoint = strings.TrimPrefix(endpoint, "/") + return resolver.Target{ Scheme: u.Scheme, Authority: u.Host, - Endpoint: endpoint, URL: *u, }, nil } diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go index 703b48da753b1..1a40e17e8d3f3 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/altscontext.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/gcp/altscontext.proto package grpc_gcp import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type AltsContext struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go index 40570e9bf2de4..50eefa53830ed 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/gcp/handshaker.proto package grpc_gcp import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type HandshakeProtocol int32 const ( @@ -216,6 +211,7 @@ type Identity struct { unknownFields protoimpl.UnknownFields // Types that are assignable to IdentityOneof: + // // *Identity_ServiceAccount // *Identity_Hostname IdentityOneof isIdentity_IdentityOneof `protobuf_oneof:"identity_oneof"` @@ -664,6 +660,7 @@ type HandshakerReq struct { unknownFields protoimpl.UnknownFields // Types that are assignable to ReqOneof: + // // *HandshakerReq_ClientStart // *HandshakerReq_ServerStart // *HandshakerReq_Next diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go index 4fc3c79d6a399..b07412f18585f 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/transport_security_common.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/gcp/transport_security_common.proto package grpc_gcp import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - // The security level of the created channel. The list is sorted in increasing // level of security. This order must always be maintained. type SecurityLevel int32 diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go index 96ff1877e7549..5feac3aa0e415 100644 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -36,16 +36,16 @@ import ( // PerRPCCredentials defines the common interface for the credentials which need to // attach security information to every RPC (e.g., oauth2). type PerRPCCredentials interface { - // GetRequestMetadata gets the current request metadata, refreshing - // tokens if required. This should be called by the transport layer on - // each request, and the data should be populated in headers or other - // context. If a status code is returned, it will be used as the status - // for the RPC. uri is the URI of the entry point for the request. - // When supported by the underlying implementation, ctx can be used for - // timeout and cancellation. Additionally, RequestInfo data will be - // available via ctx to this call. - // TODO(zhaoq): Define the set of the qualified keys instead of leaving - // it as an arbitrary string. + // GetRequestMetadata gets the current request metadata, refreshing tokens + // if required. This should be called by the transport layer on each + // request, and the data should be populated in headers or other + // context. If a status code is returned, it will be used as the status for + // the RPC (restricted to an allowable set of codes as defined by gRFC + // A54). uri is the URI of the entry point for the request. When supported + // by the underlying implementation, ctx can be used for timeout and + // cancellation. Additionally, RequestInfo data will be available via ctx + // to this call. TODO(zhaoq): Define the set of the qualified keys instead + // of leaving it as an arbitrary string. GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) // RequireTransportSecurity indicates whether the credentials requires // transport security. diff --git a/vendor/google.golang.org/grpc/credentials/google/xds.go b/vendor/google.golang.org/grpc/credentials/google/xds.go index e32edc0421c3e..2c5c8b9eee136 100644 --- a/vendor/google.golang.org/grpc/credentials/google/xds.go +++ b/vendor/google.golang.org/grpc/credentials/google/xds.go @@ -40,6 +40,7 @@ const cfeClusterAuthorityName = "traffic-director-c2p.xds.googleapis.com" // "xdstp://traffic-director-c2p.xds.googleapis.com/envoy.config.cluster.v3.Cluster/google_cfe_", // use TLS // - otherwise, use ALTS +// // - else, do TLS // // On the server, ServerHandshake always does TLS. diff --git a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go index c748fd21ce2b8..d475cbc0894c0 100644 --- a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go +++ b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go @@ -22,8 +22,8 @@ package oauth import ( "context" "fmt" - "io/ioutil" "net/url" + "os" "sync" "golang.org/x/oauth2" @@ -73,7 +73,7 @@ type jwtAccess struct { // NewJWTAccessFromFile creates PerRPCCredentials from the given keyFile. func NewJWTAccessFromFile(keyFile string) (credentials.PerRPCCredentials, error) { - jsonKey, err := ioutil.ReadFile(keyFile) + jsonKey, err := os.ReadFile(keyFile) if err != nil { return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) } @@ -121,6 +121,8 @@ type oauthAccess struct { } // NewOauthAccess constructs the PerRPCCredentials using a given token. +// +// Deprecated: use oauth.TokenSource instead. func NewOauthAccess(token *oauth2.Token) credentials.PerRPCCredentials { return oauthAccess{token: *token} } @@ -190,7 +192,7 @@ func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.PerR // NewServiceAccountFromFile constructs the PerRPCCredentials using the JSON key file // of a Google Developers service account. func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.PerRPCCredentials, error) { - jsonKey, err := ioutil.ReadFile(keyFile) + jsonKey, err := os.ReadFile(keyFile) if err != nil { return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err) } diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go index 784822d0560ad..877b7cd21af72 100644 --- a/vendor/google.golang.org/grpc/credentials/tls.go +++ b/vendor/google.golang.org/grpc/credentials/tls.go @@ -23,9 +23,9 @@ import ( "crypto/tls" "crypto/x509" "fmt" - "io/ioutil" "net" "net/url" + "os" credinternal "google.golang.org/grpc/internal/credentials" ) @@ -166,7 +166,7 @@ func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) Transpor // it will override the virtual host name of authority (e.g. :authority header // field) in requests. func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { - b, err := ioutil.ReadFile(certFile) + b, err := os.ReadFile(certFile) if err != nil { return nil, err } @@ -195,7 +195,7 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error // TLSChannelzSecurityValue defines the struct that TLS protocol should return // from GetSecurityValue(), containing security info like cipher and certificate used. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 9372dc322e803..4866da101c607 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -44,6 +44,7 @@ func init() { extraDialOptions = nil } internal.WithBinaryLogger = withBinaryLogger + internal.JoinDialOptions = newJoinDialOption } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -111,13 +112,28 @@ func newFuncDialOption(f func(*dialOptions)) *funcDialOption { } } +type joinDialOption struct { + opts []DialOption +} + +func (jdo *joinDialOption) apply(do *dialOptions) { + for _, opt := range jdo.opts { + opt.apply(do) + } +} + +func newJoinDialOption(opts ...DialOption) DialOption { + return &joinDialOption{opts: opts} +} + // WithWriteBufferSize determines how much data can be batched before doing a // write on the wire. The corresponding memory allocation for this buffer will // be twice the size to keep syscalls low. The default value for this buffer is // 32KB. // -// Zero will disable the write buffer such that each write will be on underlying -// connection. Note: A Send call may not directly translate to a write. +// Zero or negative values will disable the write buffer such that each write +// will be on underlying connection. Note: A Send call may not directly +// translate to a write. func WithWriteBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.WriteBufferSize = s @@ -127,8 +143,9 @@ func WithWriteBufferSize(s int) DialOption { // WithReadBufferSize lets you set the size of read buffer, this determines how // much data can be read at most for each read syscall. // -// The default value for this buffer is 32KB. Zero will disable read buffer for -// a connection so data framer can access the underlying conn directly. +// The default value for this buffer is 32KB. Zero or negative values will +// disable read buffer for a connection so data framer can access the +// underlying conn directly. func WithReadBufferSize(s int) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.ReadBufferSize = s diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 18e530fc90242..07a5861352a6f 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -19,7 +19,7 @@ // Package encoding defines the interface for the compressor and codec, and // functions to register and retrieve compressors and codecs. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. @@ -28,6 +28,8 @@ package encoding import ( "io" "strings" + + "google.golang.org/grpc/internal/grpcutil" ) // Identity specifies the optional encoding for uncompressed streams. @@ -73,6 +75,9 @@ var registeredCompressor = make(map[string]Compressor) // registered with the same name, the one registered last will take effect. func RegisterCompressor(c Compressor) { registeredCompressor[c.Name()] = c + if !grpcutil.IsCompressorNameRegistered(c.Name()) { + grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name()) + } } // GetCompressor returns Compressor for the given compressor name. diff --git a/vendor/google.golang.org/grpc/encoding/gzip/gzip.go b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go index ce2f15ed288f8..a3bb173c24ac7 100644 --- a/vendor/google.golang.org/grpc/encoding/gzip/gzip.go +++ b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go @@ -19,7 +19,7 @@ // Package gzip implements and registers the gzip compressor // during the initialization. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. @@ -30,7 +30,6 @@ import ( "encoding/binary" "fmt" "io" - "io/ioutil" "sync" "google.golang.org/grpc/encoding" @@ -42,7 +41,7 @@ const Name = "gzip" func init() { c := &compressor{} c.poolCompressor.New = func() interface{} { - return &writer{Writer: gzip.NewWriter(ioutil.Discard), pool: &c.poolCompressor} + return &writer{Writer: gzip.NewWriter(io.Discard), pool: &c.poolCompressor} } encoding.RegisterCompressor(c) } @@ -63,7 +62,7 @@ func SetLevel(level int) error { } c := encoding.GetCompressor(Name).(*compressor) c.poolCompressor.New = func() interface{} { - w, err := gzip.NewWriterLevel(ioutil.Discard, level) + w, err := gzip.NewWriterLevel(io.Discard, level) if err != nil { panic(err) } diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index 7c1f664090344..5de66e40d365b 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -22,7 +22,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "log" "os" "strconv" @@ -140,9 +139,9 @@ func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) // newLoggerV2 creates a loggerV2 to be used as default logger. // All logs are written to stderr. func newLoggerV2() LoggerV2 { - errorW := ioutil.Discard - warningW := ioutil.Discard - infoW := ioutil.Discard + errorW := io.Discard + warningW := io.Discard + infoW := io.Discard logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") switch logLevel { @@ -242,7 +241,7 @@ func (g *loggerT) V(l int) bool { // DepthLoggerV2, the below functions will be called with the appropriate stack // depth set for trivial functions the logger may ignore. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index a66024d23e301..8e29a62f164f5 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,14 +17,13 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.25.0 +// protoc-gen-go v1.28.1 // protoc v3.14.0 // source: grpc/health/v1/health.proto package grpc_health_v1 import ( - proto "github.com/golang/protobuf/proto" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" @@ -38,10 +37,6 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - type HealthCheckResponse_ServingStatus int32 const ( diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go index c5579e65065f6..f9e80e27ab688 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go @@ -30,15 +30,15 @@ import ( // to build a new logger and assign it to binarylog.Logger. // // Example filter config strings: -// - "" Nothing will be logged -// - "*" All headers and messages will be fully logged. -// - "*{h}" Only headers will be logged. -// - "*{m:256}" Only the first 256 bytes of each message will be logged. -// - "Foo/*" Logs every method in service Foo -// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar -// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method -// /Foo/Bar, logs all headers and messages in every other method in service -// Foo. +// - "" Nothing will be logged +// - "*" All headers and messages will be fully logged. +// - "*{h}" Only headers will be logged. +// - "*{m:256}" Only the first 256 bytes of each message will be logged. +// - "Foo/*" Logs every method in service Foo +// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar +// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method +// /Foo/Bar, logs all headers and messages in every other method in service +// Foo. // // If two configs exist for one certain method or service, the one specified // later overrides the previous config. diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 179f4a26d135b..d71e441778f4e 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -26,7 +26,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" ) @@ -79,7 +79,7 @@ func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { // Build is an internal only method for building the proto message out of the // input event. It's made public to enable other library to reuse as much logic // in TruncatingMethodLogger as possible. -func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry { +func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry { m := c.toProto() timestamp, _ := ptypes.TimestampProto(time.Now()) m.Timestamp = timestamp @@ -87,11 +87,11 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *pb.GrpcLogEntry { m.SequenceIdWithinCall = ml.idWithinCallGen.next() switch pay := m.Payload.(type) { - case *pb.GrpcLogEntry_ClientHeader: + case *binlogpb.GrpcLogEntry_ClientHeader: m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata()) - case *pb.GrpcLogEntry_ServerHeader: + case *binlogpb.GrpcLogEntry_ServerHeader: m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata()) - case *pb.GrpcLogEntry_Message: + case *binlogpb.GrpcLogEntry_Message: m.PayloadTruncated = ml.truncateMessage(pay.Message) } return m @@ -102,7 +102,7 @@ func (ml *TruncatingMethodLogger) Log(c LogEntryConfig) { ml.sink.Write(ml.Build(c)) } -func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) { if ml.headerMaxLen == maxUInt { return false } @@ -121,7 +121,7 @@ func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated // but not counted towards the size limit. continue } - currentEntryLen := uint64(len(entry.Value)) + currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue())) if currentEntryLen > bytesLimit { break } @@ -132,7 +132,7 @@ func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated return truncated } -func (ml *TruncatingMethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) { +func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) { if ml.messageMaxLen == maxUInt { return false } @@ -145,7 +145,7 @@ func (ml *TruncatingMethodLogger) truncateMessage(msgPb *pb.Message) (truncated // LogEntryConfig represents the configuration for binary log entry. type LogEntryConfig interface { - toProto() *pb.GrpcLogEntry + toProto() *binlogpb.GrpcLogEntry } // ClientHeader configs the binary log entry to be a ClientHeader entry. @@ -159,10 +159,10 @@ type ClientHeader struct { PeerAddr net.Addr } -func (c *ClientHeader) toProto() *pb.GrpcLogEntry { +func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry { // This function doesn't need to set all the fields (e.g. seq ID). The Log // function will set the fields when necessary. - clientHeader := &pb.ClientHeader{ + clientHeader := &binlogpb.ClientHeader{ Metadata: mdToMetadataProto(c.Header), MethodName: c.MethodName, Authority: c.Authority, @@ -170,16 +170,16 @@ func (c *ClientHeader) toProto() *pb.GrpcLogEntry { if c.Timeout > 0 { clientHeader.Timeout = ptypes.DurationProto(c.Timeout) } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, - Payload: &pb.GrpcLogEntry_ClientHeader{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER, + Payload: &binlogpb.GrpcLogEntry_ClientHeader{ ClientHeader: clientHeader, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -195,19 +195,19 @@ type ServerHeader struct { PeerAddr net.Addr } -func (c *ServerHeader) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, - Payload: &pb.GrpcLogEntry_ServerHeader{ - ServerHeader: &pb.ServerHeader{ +func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER, + Payload: &binlogpb.GrpcLogEntry_ServerHeader{ + ServerHeader: &binlogpb.ServerHeader{ Metadata: mdToMetadataProto(c.Header), }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -223,7 +223,7 @@ type ClientMessage struct { Message interface{} } -func (c *ClientMessage) toProto() *pb.GrpcLogEntry { +func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { var ( data []byte err error @@ -238,19 +238,19 @@ func (c *ClientMessage) toProto() *pb.GrpcLogEntry { } else { grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ Length: uint32(len(data)), Data: data, }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -263,7 +263,7 @@ type ServerMessage struct { Message interface{} } -func (c *ServerMessage) toProto() *pb.GrpcLogEntry { +func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { var ( data []byte err error @@ -278,19 +278,19 @@ func (c *ServerMessage) toProto() *pb.GrpcLogEntry { } else { grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte") } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, - Payload: &pb.GrpcLogEntry_Message{ - Message: &pb.Message{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE, + Payload: &binlogpb.GrpcLogEntry_Message{ + Message: &binlogpb.Message{ Length: uint32(len(data)), Data: data, }, }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -300,15 +300,15 @@ type ClientHalfClose struct { OnClientSide bool } -func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, +func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE, Payload: nil, // No payload here. } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -324,7 +324,7 @@ type ServerTrailer struct { PeerAddr net.Addr } -func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { +func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry { st, ok := status.FromError(c.Err) if !ok { grpclogLogger.Info("binarylogging: error in trailer is not a status error") @@ -340,10 +340,10 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err) } } - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, - Payload: &pb.GrpcLogEntry_Trailer{ - Trailer: &pb.Trailer{ + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER, + Payload: &binlogpb.GrpcLogEntry_Trailer{ + Trailer: &binlogpb.Trailer{ Metadata: mdToMetadataProto(c.Trailer), StatusCode: uint32(st.Code()), StatusMessage: st.Message(), @@ -352,9 +352,9 @@ func (c *ServerTrailer) toProto() *pb.GrpcLogEntry { }, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } if c.PeerAddr != nil { ret.Peer = addrToProto(c.PeerAddr) @@ -367,15 +367,15 @@ type Cancel struct { OnClientSide bool } -func (c *Cancel) toProto() *pb.GrpcLogEntry { - ret := &pb.GrpcLogEntry{ - Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL, +func (c *Cancel) toProto() *binlogpb.GrpcLogEntry { + ret := &binlogpb.GrpcLogEntry{ + Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL, Payload: nil, } if c.OnClientSide { - ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT } else { - ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER + ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER } return ret } @@ -392,15 +392,15 @@ func metadataKeyOmit(key string) bool { return strings.HasPrefix(key, "grpc-") } -func mdToMetadataProto(md metadata.MD) *pb.Metadata { - ret := &pb.Metadata{} +func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata { + ret := &binlogpb.Metadata{} for k, vv := range md { if metadataKeyOmit(k) { continue } for _, v := range vv { ret.Entry = append(ret.Entry, - &pb.MetadataEntry{ + &binlogpb.MetadataEntry{ Key: k, Value: []byte(v), }, @@ -410,26 +410,26 @@ func mdToMetadataProto(md metadata.MD) *pb.Metadata { return ret } -func addrToProto(addr net.Addr) *pb.Address { - ret := &pb.Address{} +func addrToProto(addr net.Addr) *binlogpb.Address { + ret := &binlogpb.Address{} switch a := addr.(type) { case *net.TCPAddr: if a.IP.To4() != nil { - ret.Type = pb.Address_TYPE_IPV4 + ret.Type = binlogpb.Address_TYPE_IPV4 } else if a.IP.To16() != nil { - ret.Type = pb.Address_TYPE_IPV6 + ret.Type = binlogpb.Address_TYPE_IPV6 } else { - ret.Type = pb.Address_TYPE_UNKNOWN + ret.Type = binlogpb.Address_TYPE_UNKNOWN // Do not set address and port fields. break } ret.Address = a.IP.String() ret.IpPort = uint32(a.Port) case *net.UnixAddr: - ret.Type = pb.Address_TYPE_UNIX + ret.Type = binlogpb.Address_TYPE_UNIX ret.Address = a.String() default: - ret.Type = pb.Address_TYPE_UNKNOWN + ret.Type = binlogpb.Address_TYPE_UNKNOWN } return ret } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go index c2fdd58b31982..264de387c2a5e 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go @@ -26,7 +26,7 @@ import ( "time" "github.com/golang/protobuf/proto" - pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" + binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1" ) var ( @@ -42,15 +42,15 @@ type Sink interface { // Write will be called to write the log entry into the sink. // // It should be thread-safe so it can be called in parallel. - Write(*pb.GrpcLogEntry) error + Write(*binlogpb.GrpcLogEntry) error // Close will be called when the Sink is replaced by a new Sink. Close() error } type noopSink struct{} -func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil } -func (ns *noopSink) Close() error { return nil } +func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil } +func (ns *noopSink) Close() error { return nil } // newWriterSink creates a binary log sink with the given writer. // @@ -66,7 +66,7 @@ type writerSink struct { out io.Writer } -func (ws *writerSink) Write(e *pb.GrpcLogEntry) error { +func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error { b, err := proto.Marshal(e) if err != nil { grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err) @@ -96,7 +96,7 @@ type bufferedSink struct { done chan struct{} } -func (fs *bufferedSink) Write(e *pb.GrpcLogEntry) error { +func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error { fs.mu.Lock() defer fs.mu.Unlock() if !fs.flusherStarted { diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go index ad0ce4dabf06c..7b2f350e2e645 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -273,10 +273,10 @@ func (c *channel) deleteSelfFromMap() (delete bool) { // deleteSelfIfReady tries to delete the channel itself from the channelz database. // The delete process includes two steps: -// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its -// parent's child list. -// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id -// will return entry not found error. +// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its +// parent's child list. +// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id +// will return entry not found error. func (c *channel) deleteSelfIfReady() { if !c.deleteSelfFromTree() { return @@ -381,10 +381,10 @@ func (sc *subChannel) deleteSelfFromMap() (delete bool) { // deleteSelfIfReady tries to delete the subchannel itself from the channelz database. // The delete process includes two steps: -// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from -// its parent's child list. -// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup -// by id will return entry not found error. +// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from +// its parent's child list. +// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup +// by id will return entry not found error. func (sc *subChannel) deleteSelfIfReady() { if !sc.deleteSelfFromTree() { return diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 6f02725431109..5ba9d94d49c2e 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -21,15 +21,42 @@ package envconfig import ( "os" + "strconv" "strings" ) -const ( - prefix = "GRPC_GO_" - txtErrIgnoreStr = prefix + "IGNORE_TXT_ERRORS" -) - var ( // TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false"). - TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false") + TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true) + // AdvertiseCompressors is set if registered compressor should be advertised + // ("GRPC_GO_ADVERTISE_COMPRESSORS" is not "false"). + AdvertiseCompressors = boolFromEnv("GRPC_GO_ADVERTISE_COMPRESSORS", true) + // RingHashCap indicates the maximum ring size which defaults to 4096 + // entries but may be overridden by setting the environment variable + // "GRPC_RING_HASH_CAP". This does not override the default bounds + // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). + RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) ) + +func boolFromEnv(envVar string, def bool) bool { + if def { + // The default is true; return true unless the variable is "false". + return !strings.EqualFold(os.Getenv(envVar), "false") + } + // The default is false; return false unless the variable is "true". + return strings.EqualFold(os.Getenv(envVar), "true") +} + +func uint64FromEnv(envVar string, def, min, max uint64) uint64 { + v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64) + if err != nil { + return def + } + if v < min { + return min + } + if v > max { + return max + } + return v +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index af09711a3e88d..04136882c7bcb 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -20,7 +20,6 @@ package envconfig import ( "os" - "strings" ) const ( @@ -36,16 +35,6 @@ const ( // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG" - - ringHashSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" - clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT" - aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" - rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC" - outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" - federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION" - rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB" - - c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI" ) var ( @@ -64,38 +53,40 @@ var ( // XDSRingHash indicates whether ring hash support is enabled, which can be // disabled by setting the environment variable // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". - XDSRingHash = !strings.EqualFold(os.Getenv(ringHashSupportEnv), "false") + XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true) // XDSClientSideSecurity is used to control processing of security // configuration on the client-side. // // Note that there is no env var protection for the server-side because we // have a brand new API on the server-side and users explicitly need to use // the new API to get security integration on the server. - XDSClientSideSecurity = !strings.EqualFold(os.Getenv(clientSideSecuritySupportEnv), "false") + XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true) // XDSAggregateAndDNS indicates whether processing of aggregated cluster // and DNS cluster is enabled, which can be enabled by setting the // environment variable // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to // "true". - XDSAggregateAndDNS = !strings.EqualFold(os.Getenv(aggregateAndDNSSupportEnv), "false") + XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true) // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, // which can be disabled by setting the environment variable // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". - XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false") + XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true) // XDSOutlierDetection indicates whether outlier detection support is // enabled, which can be disabled by setting the environment variable // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false". - XDSOutlierDetection = !strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "false") - // XDSFederation indicates whether federation support is enabled. - XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true") + XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true) + // XDSFederation indicates whether federation support is enabled, which can + // be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true". + XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", false) // XDSRLS indicates whether processing of Cluster Specifier plugins and // support for the RLS CLuster Specifier is enabled, which can be enabled by // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to // "true". - XDSRLS = strings.EqualFold(os.Getenv(rlsInXDSEnv), "true") + XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", false) // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. - C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv) + C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") ) diff --git a/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go index e53b8ffc837f7..6e455fb0a8226 100644 --- a/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go +++ b/vendor/google.golang.org/grpc/internal/googlecloud/manufacturer_linux.go @@ -18,10 +18,10 @@ package googlecloud -import "io/ioutil" +import "os" const linuxProductNameFile = "/sys/class/dmi/id/product_name" func manufacturer() ([]byte, error) { - return ioutil.ReadFile(linuxProductNameFile) + return os.ReadFile(linuxProductNameFile) } diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go index 30a3b4258fc00..b68e26a364935 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -110,7 +110,7 @@ type LoggerV2 interface { // This is a copy of the DepthLoggerV2 defined in the external grpclog package. // It is defined here to avoid a circular dependency. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go new file mode 100644 index 0000000000000..6635f7bca96da --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go @@ -0,0 +1,32 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "sync" +) + +// OnceFunc returns a function wrapping f which ensures f is only executed +// once even if the returned function is executed multiple times. +func OnceFunc(f func()) func() { + var once sync.Once + return func() { + once.Do(f) + } +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go new file mode 100644 index 0000000000000..9f4090967980b --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go @@ -0,0 +1,47 @@ +/* + * + * Copyright 2022 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcutil + +import ( + "strings" + + "google.golang.org/grpc/internal/envconfig" +) + +// RegisteredCompressorNames holds names of the registered compressors. +var RegisteredCompressorNames []string + +// IsCompressorNameRegistered returns true when name is available in registry. +func IsCompressorNameRegistered(name string) bool { + for _, compressor := range RegisteredCompressorNames { + if compressor == name { + return true + } + } + return false +} + +// RegisteredCompressors returns a string of registered compressor names +// separated by comma. +func RegisteredCompressors() string { + if !envconfig.AdvertiseCompressors { + return "" + } + return strings.Join(RegisteredCompressorNames, ",") +} diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go index e9c4af64830c5..ec62b4775e5b4 100644 --- a/vendor/google.golang.org/grpc/internal/grpcutil/method.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go @@ -25,7 +25,6 @@ import ( // ParseMethod splits service and method from the input. It expects format // "/service/method". -// func ParseMethod(methodName string) (service, method string, _ error) { if !strings.HasPrefix(methodName, "/") { return "", "", errors.New("invalid method name: should start with /") diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index fd0ee3dcaf1ef..0a76d9de6e027 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -77,6 +77,9 @@ var ( // ClearGlobalDialOptions clears the array of extra DialOption. This // method is useful in testing and benchmarking. ClearGlobalDialOptions func() + // JoinDialOptions combines the dial options passed as arguments into a + // single dial option. + JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption // JoinServerOptions combines the server options passed as arguments into a // single server option. JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 75301c514913c..09a667f33cb09 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -116,7 +116,7 @@ type dnsBuilder struct{} // Build creates and starts a DNS resolver that watches the name resolution of the target. func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { - host, port, err := parseTarget(target.Endpoint, defaultPort) + host, port, err := parseTarget(target.Endpoint(), defaultPort) if err != nil { return nil, err } @@ -140,10 +140,10 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts disableServiceConfig: opts.DisableServiceConfig, } - if target.Authority == "" { + if target.URL.Host == "" { d.resolver = defaultResolver } else { - d.resolver, err = customAuthorityResolver(target.Authority) + d.resolver, err = customAuthorityResolver(target.URL.Host) if err != nil { return nil, err } diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go index 520d9229e1ed7..afac56572ad55 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go +++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go @@ -20,13 +20,20 @@ // name without scheme back to gRPC as resolved address. package passthrough -import "google.golang.org/grpc/resolver" +import ( + "errors" + + "google.golang.org/grpc/resolver" +) const scheme = "passthrough" type passthroughBuilder struct{} func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + if target.Endpoint() == "" && opts.Dialer == nil { + return nil, errors.New("passthrough: received empty target in Build()") + } r := &passthroughResolver{ target: target, cc: cc, @@ -45,7 +52,7 @@ type passthroughResolver struct { } func (r *passthroughResolver) start() { - r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}}) + r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}}) } func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {} diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go index 7f1a702cacbef..1609116877383 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go +++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go @@ -34,8 +34,8 @@ type builder struct { } func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { - if target.Authority != "" { - return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.Authority) + if target.URL.Host != "" { + return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host) } // gRPC was parsing the dial target manually before PR #4817, and we diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go index badbdbf597f3f..51e733e495a36 100644 --- a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go @@ -67,10 +67,10 @@ func (bc *BalancerConfig) MarshalJSON() ([]byte, error) { // ServiceConfig contains a list of loadBalancingConfigs, each with a name and // config. This method iterates through that list in order, and stops at the // first policy that is supported. -// - If the config for the first supported policy is invalid, the whole service -// config is invalid. -// - If the list doesn't contain any supported policy, the whole service config -// is invalid. +// - If the config for the first supported policy is invalid, the whole service +// config is invalid. +// - If the list doesn't contain any supported policy, the whole service config +// is invalid. func (bc *BalancerConfig) UnmarshalJSON(b []byte) error { var ir intermediateBalancerConfig err := json.Unmarshal(b, &ir) diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index e5c6513edd138..b0ead4f54f82f 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -164,3 +164,13 @@ func (e *Error) Is(target error) bool { } return proto.Equal(e.s.s, tse.s.s) } + +// IsRestrictedControlPlaneCode returns whether the status includes a code +// restricted for control plane usage as defined by gRFC A54. +func IsRestrictedControlPlaneCode(s *Status) bool { + switch s.Code() { + case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss: + return true + } + return false +} diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index 409769f48fdcc..9097385e1a6a5 100644 --- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -191,7 +191,7 @@ type goAway struct { code http2.ErrCode debugData []byte headsUp bool - closeConn bool + closeConn error // if set, loopyWriter will exit, resulting in conn closure } func (*goAway) isTransportResponseFrame() bool { return false } @@ -209,6 +209,14 @@ type outFlowControlSizeRequest struct { func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false } +// closeConnection is an instruction to tell the loopy writer to flush the +// framer and exit, which will cause the transport's connection to be closed +// (by the client or server). The transport itself will close after the reader +// encounters the EOF caused by the connection closure. +type closeConnection struct{} + +func (closeConnection) isTransportResponseFrame() bool { return false } + type outStreamState int const ( @@ -408,7 +416,7 @@ func (c *controlBuffer) get(block bool) (interface{}, error) { select { case <-c.ch: case <-c.done: - return nil, ErrConnClosing + return nil, errors.New("transport closed by client") } } } @@ -519,18 +527,9 @@ const minBatchSize = 1000 // As an optimization, to increase the batch size for each flush, loopy yields the processor, once // if the batch size is too low to give stream goroutines a chance to fill it up. func (l *loopyWriter) run() (err error) { - defer func() { - if err == ErrConnClosing { - // Don't log ErrConnClosing as error since it happens - // 1. When the connection is closed by some other known issue. - // 2. User closed the connection. - // 3. A graceful close of connection. - if logger.V(logLevel) { - logger.Infof("transport: loopyWriter.run returning. %v", err) - } - err = nil - } - }() + // Always flush the writer before exiting in case there are pending frames + // to be sent. + defer l.framer.writer.Flush() for { it, err := l.cbuf.get(true) if err != nil { @@ -574,7 +573,6 @@ func (l *loopyWriter) run() (err error) { } l.framer.writer.Flush() break hasdata - } } } @@ -655,19 +653,20 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error { itl: &itemList{}, wq: h.wq, } - str.itl.enqueue(h) - return l.originateStream(str) + return l.originateStream(str, h) } -func (l *loopyWriter) originateStream(str *outStream) error { - hdr := str.itl.dequeue().(*headerFrame) - if err := hdr.initStream(str.id); err != nil { - if err == ErrConnClosing { - return err - } - // Other errors(errStreamDrain) need not close transport. +func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error { + // l.draining is set when handling GoAway. In which case, we want to avoid + // creating new streams. + if l.draining { + // TODO: provide a better error with the reason we are in draining. + hdr.onOrphaned(errStreamDrain) return nil } + if err := hdr.initStream(str.id); err != nil { + return err + } if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { return err } @@ -763,8 +762,8 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { return err } } - if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { - return ErrConnClosing + if l.draining && len(l.estdStreams) == 0 { + return errors.New("finished processing active streams while in draining mode") } return nil } @@ -799,7 +798,7 @@ func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { if l.side == clientSide { l.draining = true if len(l.estdStreams) == 0 { - return ErrConnClosing + return errors.New("received GOAWAY with no active streams") } } return nil @@ -817,6 +816,13 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { return nil } +func (l *loopyWriter) closeConnectionHandler() error { + // Exit loopyWriter entirely by returning an error here. This will lead to + // the transport closing the connection, and, ultimately, transport + // closure. + return ErrConnClosing +} + func (l *loopyWriter) handle(i interface{}) error { switch i := i.(type) { case *incomingWindowUpdate: @@ -845,6 +851,8 @@ func (l *loopyWriter) handle(i interface{}) error { return l.goAwayHandler(i) case *outFlowControlSizeRequest: return l.outFlowControlSizeRequestHandler(i) + case closeConnection: + return l.closeConnectionHandler() default: return fmt.Errorf("transport: unknown control message type %T", i) } diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go index 9fa306b2e07a2..bc8ee07474968 100644 --- a/vendor/google.golang.org/grpc/internal/transport/defaults.go +++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go @@ -47,3 +47,9 @@ const ( defaultClientMaxHeaderListSize = uint32(16 << 20) defaultServerMaxHeaderListSize = uint32(16 << 20) ) + +// MaxStreamID is the upper bound for the stream ID before the current +// transport gracefully closes and new transport is created for subsequent RPCs. +// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit +// integer. It's exported so that tests can override it. +var MaxStreamID = uint32(math.MaxInt32 * 3 / 4) diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 090120925bb46..e6626bf96e7c5 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -46,24 +46,32 @@ import ( "google.golang.org/grpc/status" ) -// NewServerHandlerTransport returns a ServerTransport handling gRPC -// from inside an http.Handler. It requires that the http Server -// supports HTTP/2. +// NewServerHandlerTransport returns a ServerTransport handling gRPC from +// inside an http.Handler, or writes an HTTP error to w and returns an error. +// It requires that the http Server supports HTTP/2. func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) { if r.ProtoMajor != 2 { - return nil, errors.New("gRPC requires HTTP/2") + msg := "gRPC requires HTTP/2" + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } if r.Method != "POST" { - return nil, errors.New("invalid gRPC request method") + msg := fmt.Sprintf("invalid gRPC request method %q", r.Method) + http.Error(w, msg, http.StatusBadRequest) + return nil, errors.New(msg) } contentType := r.Header.Get("Content-Type") // TODO: do we assume contentType is lowercase? we did before contentSubtype, validContentType := grpcutil.ContentSubtype(contentType) if !validContentType { - return nil, errors.New("invalid gRPC request content-type") + msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType) + http.Error(w, msg, http.StatusUnsupportedMediaType) + return nil, errors.New(msg) } if _, ok := w.(http.Flusher); !ok { - return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + msg := "gRPC requires a ResponseWriter supporting http.Flusher" + http.Error(w, msg, http.StatusInternalServerError) + return nil, errors.New(msg) } st := &serverHandlerTransport{ @@ -79,7 +87,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s if v := r.Header.Get("grpc-timeout"); v != "" { to, err := decodeTimeout(v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err) + msg := fmt.Sprintf("malformed grpc-timeout: %v", err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } st.timeoutSet = true st.timeout = to @@ -97,7 +107,9 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s for _, v := range vv { v, err := decodeMetadataHeader(k, v) if err != nil { - return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err) + msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err) + http.Error(w, msg, http.StatusBadRequest) + return nil, status.Error(codes.Internal, msg) } metakv = append(metakv, k, v) } @@ -141,12 +153,15 @@ type serverHandlerTransport struct { stats []stats.Handler } -func (ht *serverHandlerTransport) Close() { - ht.closeOnce.Do(ht.closeCloseChanOnce) +func (ht *serverHandlerTransport) Close(err error) { + ht.closeOnce.Do(func() { + if logger.V(logLevel) { + logger.Infof("Closing serverHandlerTransport: %v", err) + } + close(ht.closedCh) + }) } -func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } - func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } // strAddr is a net.Addr backed by either a TCP "ip:port" string, or @@ -236,7 +251,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro }) } } - ht.Close() + ht.Close(errors.New("finished writing status")) return err } @@ -346,7 +361,7 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace case <-ht.req.Context().Done(): } cancel() - ht.Close() + ht.Close(errors.New("request is done processing")) }() req := ht.req @@ -442,10 +457,10 @@ func (ht *serverHandlerTransport) Drain() { // mapRecvMsgError returns the non-nil err into the appropriate // error value as expected by callers of *grpc.parser.recvMsg. // In particular, in can only be: -// * io.EOF -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * an error from the status package +// - io.EOF +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package func mapRecvMsgError(err error) error { if err == io.EOF || err == io.ErrUnexpectedEOF { return err diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 5c2f35b24e75b..79ee8aea0a21a 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -38,8 +38,10 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" icredentials "google.golang.org/grpc/internal/credentials" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" @@ -57,11 +59,15 @@ var clientConnectionCounter uint64 // http2Client implements the ClientTransport interface with HTTP2. type http2Client struct { - lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. - ctx context.Context - cancel context.CancelFunc - ctxDone <-chan struct{} // Cache the ctx.Done() chan. - userAgent string + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + // address contains the resolver returned address for this transport. + // If the `ServerName` field is set, it takes precedence over `CallHdr.Host` + // passed to `NewStream`, when determining the :authority header. + address resolver.Address md metadata.MD conn net.Conn // underlying communication channel loopy *loopyWriter @@ -99,16 +105,13 @@ type http2Client struct { maxSendHeaderListSize *uint32 bdpEst *bdpEstimator - // onPrefaceReceipt is a callback that client transport calls upon - // receiving server preface to signal that a succefull HTTP2 - // connection was established. - onPrefaceReceipt func() maxConcurrentStreams uint32 streamQuota int64 streamsQuotaAvailable chan struct{} waitingStreams uint32 nextID uint32 + registeredCompressors string // Do not access controlBuf with mu held. mu sync.Mutex // guard the following variables @@ -137,8 +140,7 @@ type http2Client struct { channelzID *channelz.Identifier czData *channelzData - onGoAway func(GoAwayReason) - onClose func() + onClose func(GoAwayReason) bufferPool *bufferPool @@ -194,7 +196,7 @@ func isTemporary(err error) bool { // newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 // and starts to receive messages on it. Non-nil error returns if construction // fails. -func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) { +func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ *http2Client, err error) { scheme := "http" ctx, cancel := context.WithCancel(ctx) defer func() { @@ -214,14 +216,40 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts if opts.FailOnNonTempDialError { return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) } - return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) + return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err) } + // Any further errors will close the underlying connection defer func(conn net.Conn) { if err != nil { conn.Close() } }(conn) + + // The following defer and goroutine monitor the connectCtx for cancelation + // and deadline. On context expiration, the connection is hard closed and + // this function will naturally fail as a result. Otherwise, the defer + // waits for the goroutine to exit to prevent the context from being + // monitored (and to prevent the connection from ever being closed) after + // returning from this function. + ctxMonitorDone := grpcsync.NewEvent() + newClientCtx, newClientDone := context.WithCancel(connectCtx) + defer func() { + newClientDone() // Awaken the goroutine below if connectCtx hasn't expired. + <-ctxMonitorDone.Done() // Wait for the goroutine below to exit. + }() + go func(conn net.Conn) { + defer ctxMonitorDone.Fire() // Signal this goroutine has exited. + <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes. + if err := connectCtx.Err(); err != nil { + // connectCtx expired before exiting the function. Hard close the connection. + if logger.V(logLevel) { + logger.Infof("newClientTransport: aborting due to connectCtx: %v", err) + } + conn.Close() + } + }(conn) + kp := opts.KeepaliveParams // Validate keepalive parameters. if kp.Time == 0 { @@ -253,15 +281,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } } if transportCreds != nil { - rawConn := conn - // Pull the deadline from the connectCtx, which will be used for - // timeouts in the authentication protocol handshake. Can ignore the - // boolean as the deadline will return the zero value, which will make - // the conn not timeout on I/O operations. - deadline, _ := connectCtx.Deadline() - rawConn.SetDeadline(deadline) - conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, rawConn) - rawConn.SetDeadline(time.Time{}) + conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn) if err != nil { return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) } @@ -299,6 +319,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ctxDone: ctx.Done(), // Cache Done chan. cancel: cancel, userAgent: opts.UserAgent, + registeredCompressors: grpcutil.RegisteredCompressors(), + address: addr, conn: conn, remoteAddr: conn.RemoteAddr(), localAddr: conn.LocalAddr(), @@ -315,16 +337,14 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts kp: kp, statsHandlers: opts.StatsHandlers, initialWindowSize: initialWindowSize, - onPrefaceReceipt: onPrefaceReceipt, nextID: 1, maxConcurrentStreams: defaultMaxStreamsClient, streamQuota: defaultMaxStreamsClient, streamsQuotaAvailable: make(chan struct{}, 1), czData: new(channelzData), - onGoAway: onGoAway, - onClose: onClose, keepaliveEnabled: keepaliveEnabled, bufferPool: newBufferPool(), + onClose: onClose, } // Add peer information to the http2client context. t.ctx = peer.NewContext(t.ctx, t.getPeer()) @@ -363,21 +383,32 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts t.kpDormancyCond = sync.NewCond(&t.mu) go t.keepalive() } - // Start the reader goroutine for incoming message. Each transport has - // a dedicated goroutine which reads HTTP2 frame from network. Then it - // dispatches the frame to the corresponding stream entity. - go t.reader() + + // Start the reader goroutine for incoming messages. Each transport has a + // dedicated goroutine which reads HTTP2 frames from the network. Then it + // dispatches the frame to the corresponding stream entity. When the + // server preface is received, readerErrCh is closed. If an error occurs + // first, an error is pushed to the channel. This must be checked before + // returning from this function. + readerErrCh := make(chan error, 1) + go t.reader(readerErrCh) + defer func() { + if err == nil { + err = <-readerErrCh + } + if err != nil { + t.Close(err) + } + }() // Send connection preface to server. n, err := t.conn.Write(clientPreface) if err != nil { err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err) - t.Close(err) return nil, err } if n != len(clientPreface) { err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) - t.Close(err) return nil, err } var ss []http2.Setting @@ -397,14 +428,12 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts err = t.framer.fr.WriteSettings(ss...) if err != nil { err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) - t.Close(err) return nil, err } // Adjust the connection flow control window if needed. if delta := uint32(icwz - defaultWindowSize); delta > 0 { if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { err = connectionErrorf(true, err, "transport: failed to write window update: %v", err) - t.Close(err) return nil, err } } @@ -417,10 +446,8 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts go func() { t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) err := t.loopy.run() - if err != nil { - if logger.V(logLevel) { - logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) - } + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err) } // Do not close the transport. Let reader goroutine handle it since // there might be data in the buffers. @@ -507,9 +534,22 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)}) } + registeredCompressors := t.registeredCompressors if callHdr.SendCompress != "" { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: callHdr.SendCompress}) + // Include the outgoing compressor name when compressor is not registered + // via encoding.RegisterCompressor. This is possible when client uses + // WithCompressor dial option. + if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) { + if registeredCompressors != "" { + registeredCompressors += "," + } + registeredCompressors += callHdr.SendCompress + } + } + + if registeredCompressors != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors}) } if dl, ok := ctx.Deadline(); ok { // Send out timeout regardless its value. The server can detect timeout context by itself. @@ -589,7 +629,11 @@ func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[s for _, c := range t.perRPCCreds { data, err := c.GetRequestMetadata(ctx, audience) if err != nil { - if _, ok := status.FromError(err); ok { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } return nil, err } @@ -618,7 +662,14 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call } data, err := callCreds.GetRequestMetadata(ctx, audience) if err != nil { - return nil, status.Errorf(codes.Internal, "transport: %v", err) + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err) + } + return nil, err + } + return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err) } callAuthData = make(map[string]string, len(data)) for k, v := range data { @@ -634,13 +685,13 @@ func (t *http2Client) getCallAuthData(ctx context.Context, audience string, call // NewStream errors result in transparent retry, as they mean nothing went onto // the wire. However, there are two notable exceptions: // -// 1. If the stream headers violate the max header list size allowed by the -// server. It's possible this could succeed on another transport, even if -// it's unlikely, but do not transparently retry. -// 2. If the credentials errored when requesting their headers. In this case, -// it's possible a retry can fix the problem, but indefinitely transparently -// retrying is not appropriate as it is likely the credentials, if they can -// eventually succeed, would need I/O to do so. +// 1. If the stream headers violate the max header list size allowed by the +// server. It's possible this could succeed on another transport, even if +// it's unlikely, but do not transparently retry. +// 2. If the credentials errored when requesting their headers. In this case, +// it's possible a retry can fix the problem, but indefinitely transparently +// retrying is not appropriate as it is likely the credentials, if they can +// eventually succeed, would need I/O to do so. type NewStreamError struct { Err error @@ -655,6 +706,18 @@ func (e NewStreamError) Error() string { // streams. All non-nil errors returned will be *NewStreamError. func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) { ctx = peer.NewContext(ctx, t.getPeer()) + + // ServerName field of the resolver returned address takes precedence over + // Host field of CallHdr to determine the :authority header. This is because, + // the ServerName field takes precedence for server authentication during + // TLS handshake, and the :authority header should match the value used + // for server authentication. + if t.address.ServerName != "" { + newCallHdr := *callHdr + newCallHdr.Host = t.address.ServerName + callHdr = &newCallHdr + } + headerFields, err := t.createHeaderFields(ctx, callHdr) if err != nil { return nil, &NewStreamError{Err: err, AllowTransparentRetry: false} @@ -679,15 +742,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, endStream: false, initStream: func(id uint32) error { t.mu.Lock() - if state := t.state; state != reachable { + // TODO: handle transport closure in loopy instead and remove this + // initStream is never called when transport is draining. + if t.state == closing { t.mu.Unlock() - // Do a quick cleanup. - err := error(errStreamDrain) - if state == closing { - err = ErrConnClosing - } - cleanup(err) - return err + cleanup(ErrConnClosing) + return ErrConnClosing } if channelz.IsOn() { atomic.AddInt64(&t.czData.streamsStarted, 1) @@ -705,6 +765,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, } firstTry := true var ch chan struct{} + transportDrainRequired := false checkForStreamQuota := func(it interface{}) bool { if t.streamQuota <= 0 { // Can go negative if server decreases it. if firstTry { @@ -720,6 +781,11 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, h := it.(*headerFrame) h.streamID = t.nextID t.nextID += 2 + + // Drain client transport if nextID > MaxStreamID which signals gRPC that + // the connection is closed and a new one must be created for subsequent RPCs. + transportDrainRequired = t.nextID > MaxStreamID + s.id = h.streamID s.fc = &inFlow{limit: uint32(t.initialWindowSize)} t.mu.Lock() @@ -799,6 +865,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, sh.HandleRPC(s.ctx, outHeader) } } + if transportDrainRequired { + if logger.V(logLevel) { + logger.Infof("transport: t.nextID > MaxStreamID. Draining") + } + t.GracefulClose() + } return s, nil } @@ -880,20 +952,21 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. // Close kicks off the shutdown process of the transport. This should be called // only once on a transport. Once it is called, the transport should not be // accessed any more. -// -// This method blocks until the addrConn that initiated this transport is -// re-connected. This happens because t.onClose() begins reconnect logic at the -// addrConn level and blocks until the addrConn is successfully connected. func (t *http2Client) Close(err error) { t.mu.Lock() - // Make sure we only Close once. + // Make sure we only close once. if t.state == closing { t.mu.Unlock() return } - // Call t.onClose before setting the state to closing to prevent the client - // from attempting to create new streams ASAP. - t.onClose() + if logger.V(logLevel) { + logger.Infof("transport: closing: %v", err) + } + // Call t.onClose ASAP to prevent the client from attempting to create new + // streams. + if t.state != draining { + t.onClose(GoAwayInvalid) + } t.state = closing streams := t.activeStreams t.activeStreams = nil @@ -943,11 +1016,15 @@ func (t *http2Client) GracefulClose() { t.mu.Unlock() return } + if logger.V(logLevel) { + logger.Infof("transport: GracefulClose called") + } + t.onClose(GoAwayInvalid) t.state = draining active := len(t.activeStreams) t.mu.Unlock() if active == 0 { - t.Close(ErrConnClosing) + t.Close(connectionErrorf(true, nil, "no active streams left to process while draining")) return } t.controlBuf.put(&incomingGoAway{}) @@ -1105,7 +1182,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { statusCode, ok := http2ErrConvTab[f.ErrCode] if !ok { if logger.V(logLevel) { - logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + logger.Warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error: %v", f.ErrCode) } statusCode = codes.Unknown } @@ -1223,8 +1300,10 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // Notify the clientconn about the GOAWAY before we set the state to // draining, to allow the client to stop attempting to create streams // before disallowing new streams on this connection. - t.onGoAway(t.goAwayReason) - t.state = draining + if t.state != draining { + t.onClose(t.goAwayReason) + t.state = draining + } } // All streams with IDs greater than the GoAwayId // and smaller than the previous GoAway ID should be killed. @@ -1482,33 +1561,35 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) } -// reader runs as a separate goroutine in charge of reading data from network -// connection. -// -// TODO(zhaoq): currently one reader per transport. Investigate whether this is -// optimal. -// TODO(zhaoq): Check the validity of the incoming frame sequence. -func (t *http2Client) reader() { - defer close(t.readerDone) - // Check the validity of server preface. +// readServerPreface reads and handles the initial settings frame from the +// server. +func (t *http2Client) readServerPreface() error { frame, err := t.framer.fr.ReadFrame() if err != nil { - err = connectionErrorf(true, err, "error reading server preface: %v", err) - t.Close(err) // this kicks off resetTransport, so must be last before return - return - } - t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!) - if t.keepaliveEnabled { - atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + return connectionErrorf(true, err, "error reading server preface: %v", err) } sf, ok := frame.(*http2.SettingsFrame) if !ok { - // this kicks off resetTransport, so must be last before return - t.Close(connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame)) - return + return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame) } - t.onPrefaceReceipt() t.handleSettings(sf, true) + return nil +} + +// reader verifies the server preface and reads all subsequent data from +// network connection. If the server preface is not read successfully, an +// error is pushed to errCh; otherwise errCh is closed with no error. +func (t *http2Client) reader(errCh chan<- error) { + defer close(t.readerDone) + + if err := t.readServerPreface(); err != nil { + errCh <- err + return + } + close(errCh) + if t.keepaliveEnabled { + atomic.StoreInt64(&t.lastRead, time.Now().UnixNano()) + } // loop to keep reading incoming messages on this transport. for { @@ -1711,3 +1792,9 @@ func (t *http2Client) getOutFlowWindow() int64 { return -2 } } + +func (t *http2Client) stateForTesting() transportState { + t.mu.Lock() + defer t.mu.Unlock() + return t.state +} diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 3dd15647bc84b..bc3da706726d5 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -21,6 +21,7 @@ package transport import ( "bytes" "context" + "errors" "fmt" "io" "math" @@ -41,6 +42,7 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -101,13 +103,13 @@ type http2Server struct { mu sync.Mutex // guard the following - // drainChan is initialized when Drain() is called the first time. - // After which the server writes out the first GoAway(with ID 2^31-1) frame. - // Then an independent goroutine will be launched to later send the second GoAway. - // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. - // Thus call to Drain() will be a no-op if drainChan is already initialized since draining is - // already underway. - drainChan chan struct{} + // drainEvent is initialized when Drain() is called the first time. After + // which the server writes out the first GoAway(with ID 2^31-1) frame. Then + // an independent goroutine will be launched to later send the second + // GoAway. During this time we don't want to write another first GoAway(with + // ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is + // already initialized since draining is already underway. + drainEvent *grpcsync.Event state transportState activeStreams map[uint32]*Stream // idle is the time instant when the connection went idle. @@ -293,7 +295,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, defer func() { if err != nil { - t.Close() + t.Close(err) } }() @@ -331,10 +333,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, go func() { t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler - if err := t.loopy.run(); err != nil { - if logger.V(logLevel) { - logger.Errorf("transport: loopyWriter.run returning. Err: %v", err) - } + err := t.loopy.run() + if logger.V(logLevel) { + logger.Infof("transport: loopyWriter exited. Closing connection. Err: %v", err) } t.conn.Close() t.controlBuf.finish() @@ -344,8 +345,9 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, return t, nil } -// operateHeader takes action on the decoded headers. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) { +// operateHeaders takes action on the decoded headers. Returns an error if fatal +// error encountered and transport needs to close, otherwise returns nil. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -361,15 +363,12 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rstCode: http2.ErrCodeFrameSize, onWrite: func() {}, }) - return false + return nil } if streamID%2 != 1 || streamID <= t.maxStreamID { // illegal gRPC stream id. - if logger.V(logLevel) { - logger.Errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) - } - return true + return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame) } t.maxStreamID = streamID @@ -381,13 +380,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( fc: &inFlow{limit: uint32(t.initialWindowSize)}, } var ( - // If a gRPC Response-Headers has already been received, then it means - // that the peer is speaking gRPC and we are in gRPC mode. - isGRPC = false - mdata = make(map[string][]string) - httpMethod string - // headerError is set if an error is encountered while parsing the headers - headerError bool + // if false, content-type was missing or invalid + isGRPC = false + contentType = "" + mdata = make(map[string][]string) + httpMethod string + // these are set if an error is encountered while parsing the headers + protocolError bool + headerError *status.Status timeoutSet bool timeout time.Duration @@ -398,6 +398,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( case "content-type": contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value) if !validContentType { + contentType = hf.Value break } mdata[hf.Name] = append(mdata[hf.Name], hf.Value) @@ -413,7 +414,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( timeoutSet = true var err error if timeout, err = decodeTimeout(hf.Value); err != nil { - headerError = true + headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err) } // "Transports must consider requests containing the Connection header // as malformed." - A41 @@ -421,14 +422,14 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( if logger.V(logLevel) { logger.Errorf("transport: http2Server.operateHeaders parsed a :connection header which makes a request malformed as per the HTTP/2 spec") } - headerError = true + protocolError = true default: if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) { break } v, err := decodeMetadataHeader(hf.Name, hf.Value) if err != nil { - headerError = true + headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err) logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err) break } @@ -447,23 +448,43 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( logger.Errorf("transport: %v", errMsg) } t.controlBuf.put(&earlyAbortStream{ - httpStatus: 400, + httpStatus: http.StatusBadRequest, streamID: streamID, contentSubtype: s.contentSubtype, status: status.New(codes.Internal, errMsg), rst: !frame.StreamEnded(), }) - return false + return nil } - if !isGRPC || headerError { + if protocolError { t.controlBuf.put(&cleanupStream{ streamID: streamID, rst: true, rstCode: http2.ErrCodeProtocol, onWrite: func() {}, }) - return false + return nil + } + if !isGRPC { + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusUnsupportedMediaType, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType), + rst: !frame.StreamEnded(), + }) + return nil + } + if headerError != nil { + t.controlBuf.put(&earlyAbortStream{ + httpStatus: http.StatusBadRequest, + streamID: streamID, + contentSubtype: s.contentSubtype, + status: headerError, + rst: !frame.StreamEnded(), + }) + return nil } // "If :authority is missing, Host must be renamed to :authority." - A41 @@ -503,7 +524,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( if t.state != reachable { t.mu.Unlock() s.cancel() - return false + return nil } if uint32(len(t.activeStreams)) >= t.maxStreams { t.mu.Unlock() @@ -514,7 +535,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( onWrite: func() {}, }) s.cancel() - return false + return nil } if httpMethod != http.MethodPost { t.mu.Unlock() @@ -530,7 +551,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( rst: !frame.StreamEnded(), }) s.cancel() - return false + return nil } if t.inTapHandle != nil { var err error @@ -550,7 +571,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( status: stat, rst: !frame.StreamEnded(), }) - return false + return nil } } t.activeStreams[streamID] = s @@ -597,7 +618,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( wq: s.wq, }) handle(s) - return false + return nil } // HandleStreams receives incoming streams using the given handler. This is @@ -630,19 +651,16 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. continue } if err == io.EOF || err == io.ErrUnexpectedEOF { - t.Close() + t.Close(err) return } - if logger.V(logLevel) { - logger.Warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) - } - t.Close() + t.Close(err) return } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if t.operateHeaders(frame, handle, traceCtx) { - t.Close() + if err := t.operateHeaders(frame, handle, traceCtx); err != nil { + t.Close(err) break } case *http2.DataFrame: @@ -843,8 +861,8 @@ const ( func (t *http2Server) handlePing(f *http2.PingFrame) { if f.IsAck() { - if f.Data == goAwayPing.data && t.drainChan != nil { - close(t.drainChan) + if f.Data == goAwayPing.data && t.drainEvent != nil { + t.drainEvent.Fire() return } // Maybe it's a BDP ping. @@ -886,10 +904,7 @@ func (t *http2Server) handlePing(f *http2.PingFrame) { if t.pingStrikes > maxPingStrikes { // Send goaway and close the connection. - if logger.V(logLevel) { - logger.Errorf("transport: Got too many pings from the client, closing the connection.") - } - t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")}) } } @@ -1153,7 +1168,7 @@ func (t *http2Server) keepalive() { if logger.V(logLevel) { logger.Infof("transport: closing server transport due to maximum connection age.") } - t.Close() + t.controlBuf.put(closeConnection{}) case <-t.done: } return @@ -1169,10 +1184,7 @@ func (t *http2Server) keepalive() { continue } if outstandingPing && kpTimeoutLeft <= 0 { - if logger.V(logLevel) { - logger.Infof("transport: closing server transport due to idleness.") - } - t.Close() + t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Time)) return } if !outstandingPing { @@ -1199,12 +1211,15 @@ func (t *http2Server) keepalive() { // Close starts shutting down the http2Server transport. // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This // could cause some resource issue. Revisit this later. -func (t *http2Server) Close() { +func (t *http2Server) Close(err error) { t.mu.Lock() if t.state == closing { t.mu.Unlock() return } + if logger.V(logLevel) { + logger.Infof("transport: closing: %v", err) + } t.state = closing streams := t.activeStreams t.activeStreams = nil @@ -1295,10 +1310,10 @@ func (t *http2Server) RemoteAddr() net.Addr { func (t *http2Server) Drain() { t.mu.Lock() defer t.mu.Unlock() - if t.drainChan != nil { + if t.drainEvent != nil { return } - t.drainChan = make(chan struct{}) + t.drainEvent = grpcsync.NewEvent() t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) } @@ -1319,19 +1334,20 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { // Stop accepting more streams now. t.state = draining sid := t.maxStreamID + retErr := g.closeConn if len(t.activeStreams) == 0 { - g.closeConn = true + retErr = errors.New("second GOAWAY written and no active streams left to process") } t.mu.Unlock() t.maxStreamMu.Unlock() if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { return false, err } - if g.closeConn { + if retErr != nil { // Abruptly close the connection following the GoAway (via // loopywriter). But flush out what's inside the buffer first. t.framer.writer.Flush() - return false, fmt.Errorf("transport: Connection closing") + return false, retErr } return true, nil } @@ -1353,7 +1369,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { timer := time.NewTimer(time.Minute) defer timer.Stop() select { - case <-t.drainChan: + case <-t.drainEvent.Done(): case <-timer.C: case <-t.done: return diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 6c3ba8515940e..0ac77ea4f8c70 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -43,6 +43,10 @@ import ( "google.golang.org/grpc/tap" ) +// ErrNoHeaders is used as a signal that a trailers only response was received, +// and is not a real error. +var ErrNoHeaders = errors.New("stream has no headers") + const logLevel = 2 type bufferPool struct { @@ -366,9 +370,15 @@ func (s *Stream) Header() (metadata.MD, error) { return s.header.Copy(), nil } s.waitOnHeader() + if !s.headerValid { return nil, s.status.Err() } + + if s.noHeaders { + return nil, ErrNoHeaders + } + return s.header.Copy(), nil } @@ -573,8 +583,8 @@ type ConnectOptions struct { // NewClientTransport establishes the transport with the required ConnectOptions // and returns it to the caller. -func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) { - return newHTTP2Client(connectCtx, ctx, addr, opts, onPrefaceReceipt, onGoAway, onClose) +func NewClientTransport(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, addr, opts, onClose) } // Options provides additional hints and information for message @@ -691,7 +701,7 @@ type ServerTransport interface { // Close tears down the transport. Once it is called, the transport // should not be accessed any more. All the pending streams and their // handlers will be terminated asynchronously. - Close() + Close(err error) // RemoteAddr returns the remote network address. RemoteAddr() net.Addr diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go index 98d62e0675f61..fb4a88f59bd3c 100644 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -41,10 +41,11 @@ type MD map[string][]string // New creates an MD from a given key-value map. // // Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// // Uppercase letters are automatically converted to lowercase. // // Keys beginning with "grpc-" are reserved for grpc-internal use only and may @@ -62,10 +63,11 @@ func New(m map[string]string) MD { // Pairs panics if len(kv) is odd. // // Only the following ASCII characters are allowed in keys: -// - digits: 0-9 -// - uppercase letters: A-Z (normalized to lower) -// - lowercase letters: a-z -// - special characters: -_. +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// // Uppercase letters are automatically converted to lowercase. // // Keys beginning with "grpc-" are reserved for grpc-internal use only and may @@ -196,7 +198,7 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { // ValueFromIncomingContext returns the metadata value corresponding to the metadata // key from the incoming metadata if it exists. Key must be lower-case. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index 843633c910a11..c525dc070fc6d 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/channelz" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/status" ) @@ -57,12 +58,18 @@ func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Unlock() } -func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) { +// doneChannelzWrapper performs the following: +// - increments the calls started channelz counter +// - wraps the done function in the passed in result to increment the calls +// failed or calls succeeded channelz counter before invoking the actual +// done function. +func doneChannelzWrapper(acw *acBalancerWrapper, result *balancer.PickResult) { acw.mu.Lock() ac := acw.ac acw.mu.Unlock() ac.incrCallsStarted() - return func(b balancer.DoneInfo) { + done := result.Done + result.Done = func(b balancer.DoneInfo) { if b.Err != nil && b.Err != io.EOF { ac.incrCallsFailed() } else { @@ -81,7 +88,7 @@ func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) f // - the current picker returns other errors and failfast is false. // - the subConn returned by the current picker is not READY // When one of these situations happens, pick blocks until the picker gets updated. -func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, func(balancer.DoneInfo), error) { +func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (transport.ClientTransport, balancer.PickResult, error) { var ch chan struct{} var lastPickErr error @@ -89,7 +96,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. pw.mu.Lock() if pw.done { pw.mu.Unlock() - return nil, nil, ErrClientConnClosing + return nil, balancer.PickResult{}, ErrClientConnClosing } if pw.picker == nil { @@ -110,9 +117,9 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } switch ctx.Err() { case context.DeadlineExceeded: - return nil, nil, status.Error(codes.DeadlineExceeded, errStr) + return nil, balancer.PickResult{}, status.Error(codes.DeadlineExceeded, errStr) case context.Canceled: - return nil, nil, status.Error(codes.Canceled, errStr) + return nil, balancer.PickResult{}, status.Error(codes.Canceled, errStr) } case <-ch: } @@ -124,14 +131,17 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. pw.mu.Unlock() pickResult, err := p.Pick(info) - if err != nil { if err == balancer.ErrNoSubConnAvailable { continue } - if _, ok := status.FromError(err); ok { + if st, ok := status.FromError(err); ok { // Status error: end the RPC unconditionally with this status. - return nil, nil, dropError{error: err} + // First restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err) + } + return nil, balancer.PickResult{}, dropError{error: err} } // For all other errors, wait for ready RPCs should block and other // RPCs should fail with unavailable. @@ -139,7 +149,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. lastPickErr = err continue } - return nil, nil, status.Error(codes.Unavailable, err.Error()) + return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) } acw, ok := pickResult.SubConn.(*acBalancerWrapper) @@ -149,9 +159,10 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. } if t := acw.getAddrConn().getReadyTransport(); t != nil { if channelz.IsOn() { - return t, doneChannelzWrapper(acw, pickResult.Done), nil + doneChannelzWrapper(acw, &pickResult) + return t, pickResult, nil } - return t, pickResult.Done, nil + return t, pickResult, nil } if pickResult.Done != nil { // Calling done with nil error, no bytes sent and no bytes received. diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index fb7a99e0a2734..fc91b4d266de2 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -51,7 +51,7 @@ type pickfirstBalancer struct { func (b *pickfirstBalancer) ResolverError(err error) { if logger.V(2) { - logger.Infof("pickfirstBalancer: ResolverError called with error %v", err) + logger.Infof("pickfirstBalancer: ResolverError called with error: %v", err) } if b.subConn == nil { b.state = connectivity.TransientFailure @@ -102,8 +102,8 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState b.subConn = subConn b.state = connectivity.Idle b.cc.UpdateState(balancer.State{ - ConnectivityState: connectivity.Idle, - Picker: &picker{result: balancer.PickResult{SubConn: b.subConn}}, + ConnectivityState: connectivity.Connecting, + Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) b.subConn.Connect() return nil diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go index 0a1e975ad9164..cd45547854f07 100644 --- a/vendor/google.golang.org/grpc/preloader.go +++ b/vendor/google.golang.org/grpc/preloader.go @@ -25,7 +25,7 @@ import ( // PreparedMsg is responsible for creating a Marshalled and Compressed object. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh index 99db79fafcfb8..a6f26c8ab0f03 100644 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -57,7 +57,8 @@ LEGACY_SOURCES=( ${WORKDIR}/grpc-proto/grpc/health/v1/health.proto ${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto profiling/proto/service.proto - reflection/grpc_reflection_v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto + ${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto ) # Generates only the new gRPC Service symbols @@ -119,8 +120,4 @@ mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/ # see grpc_testing_not_regenerate/README.md for details. rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go -# grpc/testing does not have a go_package option. -mv ${WORKDIR}/out/grpc/testing/*.pb.go interop/grpc_testing/ -mv ${WORKDIR}/out/grpc/core/*.pb.go interop/grpc_testing/core/ - cp -R ${WORKDIR}/out/google.golang.org/grpc/* . diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index ca2e35a3596f7..654e9ce69f4a8 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -24,6 +24,7 @@ import ( "context" "net" "net/url" + "strings" "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" @@ -96,7 +97,7 @@ const ( // Address represents a server the client connects to. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -236,20 +237,17 @@ type ClientConn interface { // // Examples: // -// - "dns://some_authority/foo.bar" -// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} -// - "foo.bar" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} -// - "unknown_scheme://authority/endpoint" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} +// - "dns://some_authority/foo.bar" +// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} +// - "foo.bar" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} +// - "unknown_scheme://authority/endpoint" +// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} type Target struct { // Deprecated: use URL.Scheme instead. Scheme string // Deprecated: use URL.Host instead. Authority string - // Deprecated: use URL.Path or URL.Opaque instead. The latter is set when - // the former is empty. - Endpoint string // URL contains the parsed dial target with an optional default scheme added // to it if the original dial target contained no scheme or contained an // unregistered scheme. Any query params specified in the original dial @@ -257,6 +255,24 @@ type Target struct { URL url.URL } +// Endpoint retrieves endpoint without leading "/" from either `URL.Path` +// or `URL.Opaque`. The latter is used when the former is empty. +func (t Target) Endpoint() string { + endpoint := t.URL.Path + if endpoint == "" { + endpoint = t.URL.Opaque + } + // For targets of the form "[scheme]://[authority]/endpoint, the endpoint + // value returned from url.Parse() contains a leading "/". Although this is + // in accordance with RFC 3986, we do not want to break existing resolver + // implementations which expect the endpoint without the leading "/". So, we + // end up stripping the leading "/" here. But this will result in an + // incorrect parsing for something like "unix:///path/to/socket". Since we + // own the "unix" resolver, we can workaround in the unix resolver by using + // the `URL` field. + return strings.TrimPrefix(endpoint, "/") +} + // Builder creates a resolver that will be used to watch name resolution updates. type Builder interface { // Build creates a new resolver for the given target. diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 5d407b004b0ed..cb7020ebecd78 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -25,7 +25,6 @@ import ( "encoding/binary" "fmt" "io" - "io/ioutil" "math" "strings" "sync" @@ -77,7 +76,7 @@ func NewGZIPCompressorWithLevel(level int) (Compressor, error) { return &gzipCompressor{ pool: sync.Pool{ New: func() interface{} { - w, err := gzip.NewWriterLevel(ioutil.Discard, level) + w, err := gzip.NewWriterLevel(io.Discard, level) if err != nil { panic(err) } @@ -143,7 +142,7 @@ func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { z.Close() d.pool.Put(z) }() - return ioutil.ReadAll(z) + return io.ReadAll(z) } func (d *gzipDecompressor) Type() string { @@ -198,7 +197,7 @@ func Header(md *metadata.MD) CallOption { // HeaderCallOption is a CallOption for collecting response header metadata. // The metadata field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -220,7 +219,7 @@ func Trailer(md *metadata.MD) CallOption { // TrailerCallOption is a CallOption for collecting response trailer metadata. // The metadata field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -242,7 +241,7 @@ func Peer(p *peer.Peer) CallOption { // PeerCallOption is a CallOption for collecting the identity of the remote // peer. The peer field will be populated *after* the RPC completes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -282,7 +281,7 @@ func FailFast(failFast bool) CallOption { // FailFastCallOption is a CallOption for indicating whether an RPC should fail // fast or not. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -297,7 +296,8 @@ func (o FailFastCallOption) before(c *callInfo) error { func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxCallRecvMsgSize returns a CallOption which sets the maximum message size -// in bytes the client can receive. +// in bytes the client can receive. If this is not set, gRPC uses the default +// 4MB. func MaxCallRecvMsgSize(bytes int) CallOption { return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes} } @@ -305,7 +305,7 @@ func MaxCallRecvMsgSize(bytes int) CallOption { // MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message // size in bytes the client can receive. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -320,7 +320,8 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxCallSendMsgSize returns a CallOption which sets the maximum message size -// in bytes the client can send. +// in bytes the client can send. If this is not set, gRPC uses the default +// `math.MaxInt32`. func MaxCallSendMsgSize(bytes int) CallOption { return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes} } @@ -328,7 +329,7 @@ func MaxCallSendMsgSize(bytes int) CallOption { // MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message // size in bytes the client can send. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -351,7 +352,7 @@ func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { // PerRPCCredsCallOption is a CallOption that indicates the per-RPC // credentials to use for the call. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -369,7 +370,7 @@ func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {} // sending the request. If WithCompressor is also set, UseCompressor has // higher priority. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -379,7 +380,7 @@ func UseCompressor(name string) CallOption { // CompressorCallOption is a CallOption that indicates the compressor to use. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -416,7 +417,7 @@ func CallContentSubtype(contentSubtype string) CallOption { // ContentSubtypeCallOption is a CallOption that indicates the content-subtype // used for marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -444,7 +445,7 @@ func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {} // This function is provided for advanced users; prefer to use only // CallContentSubtype to select a registered codec instead. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -455,7 +456,7 @@ func ForceCodec(codec encoding.Codec) CallOption { // ForceCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -480,7 +481,7 @@ func CallCustomCodec(codec Codec) CallOption { // CustomCodecCallOption is a CallOption that indicates the codec used for // marshaling messages. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -497,7 +498,7 @@ func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {} // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory // used for buffering this RPC's requests for retry purposes. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. @@ -508,7 +509,7 @@ func MaxRetryRPCBufferSize(bytes int) CallOption { // MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of // memory to be used for caching this RPC for retry purposes. // -// Experimental +// # Experimental // // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. @@ -548,10 +549,11 @@ type parser struct { // format. The caller owns the returned msg memory. // // If there is an error, possible values are: -// * io.EOF, when no messages remain -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * an error from the status package +// - io.EOF, when no messages remain +// - io.ErrUnexpectedEOF +// - of type transport.ConnectionError +// - an error from the status package +// // No other error values or types must be returned, which also means // that the underlying io.Reader must not return an incompatible // error. @@ -710,7 +712,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei d, size, err = decompress(compressor, d, maxReceiveMessageSize) } if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) } if size > maxReceiveMessageSize { // TODO: Revisit the error code. Currently keep it consistent with java @@ -745,7 +747,7 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize } // Read from LimitReader with limit max+1. So if the underlying // reader is over limit, the result will be bigger than max. - d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) + d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1)) return d, len(d), err } @@ -758,7 +760,7 @@ func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interf return err } if err := c.Unmarshal(d, m); err != nil { - return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) + return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } if payInfo != nil { payInfo.uncompressedBytes = d diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index f4dde72b41f89..d5a6e78be44d0 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -233,10 +233,11 @@ func newJoinServerOption(opts ...ServerOption) ServerOption { return &joinServerOption{opts: opts} } -// WriteBufferSize determines how much data can be batched before doing a write on the wire. -// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low. -// The default value for this buffer is 32KB. -// Zero will disable the write buffer such that each write will be on underlying connection. +// WriteBufferSize determines how much data can be batched before doing a write +// on the wire. The corresponding memory allocation for this buffer will be +// twice the size to keep syscalls low. The default value for this buffer is +// 32KB. Zero or negative values will disable the write buffer such that each +// write will be on underlying connection. // Note: A Send call may not directly translate to a write. func WriteBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { @@ -244,11 +245,10 @@ func WriteBufferSize(s int) ServerOption { }) } -// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most -// for one read syscall. -// The default value for this buffer is 32KB. -// Zero will disable read buffer for a connection so data framer can access the underlying -// conn directly. +// ReadBufferSize lets you set the size of read buffer, this determines how much +// data can be read at most for one read syscall. The default value for this +// buffer is 32KB. Zero or negative values will disable read buffer for a +// connection so data framer can access the underlying conn directly. func ReadBufferSize(s int) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.readBufferSize = s @@ -942,7 +942,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { } func (s *Server) serveStreams(st transport.ServerTransport) { - defer st.Close() + defer st.Close(errors.New("finished serving streams for the server transport")) var wg sync.WaitGroup var roundRobinCounter uint32 @@ -1008,7 +1008,8 @@ var _ http.Handler = (*Server)(nil) func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers) if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + // Errors returned from transport.NewServerHandlerTransport have + // already been written to w. return } if !s.addConn(listenerAddressForServeHTTP, st) { @@ -1046,7 +1047,7 @@ func (s *Server) addConn(addr string, st transport.ServerTransport) bool { s.mu.Lock() defer s.mu.Unlock() if s.conns == nil { - st.Close() + st.Close(errors.New("Server.addConn called when server has already been stopped")) return false } if s.drain { @@ -1150,21 +1151,16 @@ func chainUnaryServerInterceptors(s *Server) { func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { - // the struct ensures the variables are allocated together, rather than separately, since we - // know they should be garbage collected together. This saves 1 allocation and decreases - // time/call by about 10% on the microbenchmark. - var state struct { - i int - next UnaryHandler - } - state.next = func(ctx context.Context, req interface{}) (interface{}, error) { - if state.i == len(interceptors)-1 { - return interceptors[state.i](ctx, req, info, handler) - } - state.i++ - return interceptors[state.i-1](ctx, req, info, state.next) - } - return state.next(ctx, req) + return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) + } +} + +func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(ctx context.Context, req interface{}) (interface{}, error) { + return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) } } @@ -1303,7 +1299,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { - channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status %v", e) + channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) } return err } @@ -1470,21 +1466,16 @@ func chainStreamServerInterceptors(s *Server) { func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { - // the struct ensures the variables are allocated together, rather than separately, since we - // know they should be garbage collected together. This saves 1 allocation and decreases - // time/call by about 10% on the microbenchmark. - var state struct { - i int - next StreamHandler - } - state.next = func(srv interface{}, ss ServerStream) error { - if state.i == len(interceptors)-1 { - return interceptors[state.i](srv, ss, info, handler) - } - state.i++ - return interceptors[state.i-1](srv, ss, info, state.next) - } - return state.next(srv, ss) + return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) + } +} + +func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler { + if curr == len(interceptors)-1 { + return finalHandler + } + return func(srv interface{}, stream ServerStream) error { + return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) } } @@ -1819,7 +1810,7 @@ func (s *Server) Stop() { } for _, cs := range conns { for st := range cs { - st.Close() + st.Close(errors.New("Server.Stop called")) } } if s.opts.numServerWorkers > 0 { diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index 01bbb2025aed4..f22acace4253c 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -226,7 +226,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { var rsc jsonSC err := json.Unmarshal([]byte(js), &rsc) if err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } sc := ServiceConfig{ @@ -254,7 +254,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { } d, err := parseDuration(m.Timeout) if err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } @@ -263,7 +263,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { Timeout: d, } if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) return &serviceconfig.ParseResult{Err: err} } if m.MaxRequestMessageBytes != nil { @@ -283,13 +283,13 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { for i, n := range *m.Name { path, err := n.generatePath() if err != nil { - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } if _, ok := paths[path]; ok { err = errDuplicatedName - logger.Warningf("grpc: parseServiceConfig error unmarshaling %s due to methodConfig[%d]: %v", js, i, err) + logger.Warningf("grpc: error unmarshaling service config %s due to methodConfig[%d]: %v", js, i, err) return &serviceconfig.ParseResult{Err: err} } paths[path] = struct{}{} diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go index 73a2f926613e4..35e7a20a04baa 100644 --- a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go +++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go @@ -19,7 +19,7 @@ // Package serviceconfig defines types and methods for operating on gRPC // service configs. // -// Experimental +// # Experimental // // Notice: This package is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index 6d163b6e38422..623be39f26bab 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -76,14 +76,14 @@ func FromProto(s *spb.Status) *Status { // FromError returns a Status representation of err. // -// - If err was produced by this package or implements the method `GRPCStatus() -// *Status`, the appropriate Status is returned. +// - If err was produced by this package or implements the method `GRPCStatus() +// *Status`, the appropriate Status is returned. // -// - If err is nil, a Status is returned with codes.OK and no message. +// - If err is nil, a Status is returned with codes.OK and no message. // -// - Otherwise, err is an error not compatible with this package. In this -// case, a Status is returned with codes.Unknown and err's Error() message, -// and ok is false. +// - Otherwise, err is an error not compatible with this package. In this +// case, a Status is returned with codes.Unknown and err's Error() message, +// and ok is false. func FromError(err error) (s *Status, ok bool) { if err == nil { return nil, true diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index 0c16cfb2ea807..93231af2ac569 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -39,6 +39,7 @@ import ( imetadata "google.golang.org/grpc/internal/metadata" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/serviceconfig" + istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/metadata" "google.golang.org/grpc/peer" @@ -195,6 +196,13 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method} rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo) if err != nil { + if st, ok := status.FromError(err); ok { + // Restrict the code to the list allowed by gRFC A54. + if istatus.IsRestrictedControlPlaneCode(st) { + err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err) + } + return nil, err + } return nil, toRPCErr(err) } @@ -408,7 +416,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) ctx = trace.NewContext(ctx, trInfo.tr) } - if cs.cc.parsedTarget.Scheme == "xds" { + if cs.cc.parsedTarget.URL.Scheme == "xds" { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( @@ -430,7 +438,7 @@ func (a *csAttempt) getTransport() error { cs := a.cs var err error - a.t, a.done, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) + a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) if err != nil { if de, ok := err.(dropError); ok { err = de.error @@ -447,6 +455,25 @@ func (a *csAttempt) getTransport() error { func (a *csAttempt) newStream() error { cs := a.cs cs.callHdr.PreviousAttempts = cs.numRetries + + // Merge metadata stored in PickResult, if any, with existing call metadata. + // It is safe to overwrite the csAttempt's context here, since all state + // maintained in it are local to the attempt. When the attempt has to be + // retried, a new instance of csAttempt will be created. + if a.pickResult.Metatada != nil { + // We currently do not have a function it the metadata package which + // merges given metadata with existing metadata in a context. Existing + // function `AppendToOutgoingContext()` takes a variadic argument of key + // value pairs. + // + // TODO: Make it possible to retrieve key value pairs from metadata.MD + // in a form passable to AppendToOutgoingContext(), or create a version + // of AppendToOutgoingContext() that accepts a metadata.MD. + md, _ := metadata.FromOutgoingContext(a.ctx) + md = metadata.Join(md, a.pickResult.Metatada) + a.ctx = metadata.NewOutgoingContext(a.ctx, md) + } + s, err := a.t.NewStream(a.ctx, cs.callHdr) if err != nil { nse, ok := err.(*transport.NewStreamError) @@ -521,12 +548,12 @@ type clientStream struct { // csAttempt implements a single transport stream attempt within a // clientStream. type csAttempt struct { - ctx context.Context - cs *clientStream - t transport.ClientTransport - s *transport.Stream - p *parser - done func(balancer.DoneInfo) + ctx context.Context + cs *clientStream + t transport.ClientTransport + s *transport.Stream + p *parser + pickResult balancer.PickResult finished bool dc Decompressor @@ -744,17 +771,25 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) func (cs *clientStream) Header() (metadata.MD, error) { var m metadata.MD + noHeader := false err := cs.withRetry(func(a *csAttempt) error { var err error m, err = a.s.Header() + if err == transport.ErrNoHeaders { + noHeader = true + return nil + } return toRPCErr(err) }, cs.commitAttemptLocked) + if err != nil { cs.finish(err) return nil, err } - if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { - // Only log if binary log is on and header has not been logged. + + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader { + // Only log if binary log is on and header has not been logged, and + // there is actually headers to log. logEntry := &binarylog.ServerHeader{ OnClientSide: true, Header: m, @@ -1087,12 +1122,12 @@ func (a *csAttempt) finish(err error) { tr = a.s.Trailer() } - if a.done != nil { + if a.pickResult.Done != nil { br := false if a.s != nil { br = a.s.BytesReceived() } - a.done(balancer.DoneInfo{ + a.pickResult.Done(balancer.DoneInfo{ Err: err, Trailer: tr, BytesSent: a.s != nil, @@ -1448,6 +1483,9 @@ type ServerStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. SendMsg(m interface{}) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the client has performed a CloseSend. On diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go index dbf34e6bb5f57..bfa5dfa40e4d1 100644 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -19,7 +19,7 @@ // Package tap defines the function handles which are executed on the transport // layer of gRPC-Go and related information. // -// Experimental +// # Experimental // // Notice: This API is EXPERIMENTAL and may be changed or removed in a // later release. diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index d472ca64307bb..fe552c315be22 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.50.1" +const Version = "1.53.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index c3fc8253b13a6..3728aed04fc74 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -66,8 +66,21 @@ elif [[ "$#" -ne 0 ]]; then die "Unknown argument(s): $*" fi +# - Check that generated proto files are up to date. +if [[ -z "${VET_SKIP_PROTO}" ]]; then + PATH="/home/travis/bin:${PATH}" make proto && \ + git status --porcelain 2>&1 | fail_on_output || \ + (git status; git --no-pager diff; exit 1) +fi + +if [[ -n "${VET_ONLY_PROTO}" ]]; then + exit 0 +fi + # - Ensure all source files contain a copyright message. -not git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go' +# (Done in two parts because Darwin "git grep" has broken support for compound +# exclusion matches.) +(grep -L "DO NOT EDIT" $(git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)" -- '*.go') || true) | fail_on_output # - Make sure all tests in grpc and grpc/test use leakcheck via Teardown. not grep 'func Test[^(]' *_test.go @@ -81,7 +94,7 @@ not git grep -l 'x/net/context' -- "*.go" git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' # - Do not call grpclog directly. Use grpclog.Component instead. -git grep -l 'grpclog.I\|grpclog.W\|grpclog.E\|grpclog.F\|grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' +git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' # - Ensure all ptypes proto packages are renamed when importing. not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" @@ -91,13 +104,6 @@ git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*. misspell -error . -# - Check that generated proto files are up to date. -if [[ -z "${VET_SKIP_PROTO}" ]]; then - PATH="/home/travis/bin:${PATH}" make proto && \ - git status --porcelain 2>&1 | fail_on_output || \ - (git status; git --no-pager diff; exit 1) -fi - # - gofmt, goimports, golint (with exceptions for generated code), go vet, # go mod tidy. # Perform these checks on each module inside gRPC. @@ -109,7 +115,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do goimports -l . 2>&1 | not grep -vE "\.pb\.go" golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" - go mod tidy + go mod tidy -compat=1.17 git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) popd @@ -119,8 +125,9 @@ done # # TODO(dfawley): don't use deprecated functions in examples or first-party # plugins. +# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs. SC_OUT="$(mktemp)" -staticcheck -go 1.9 -checks 'inherit,-ST1015' ./... > "${SC_OUT}" || true +staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true # Error if anything other than deprecation warnings are printed. not grep -v "is deprecated:.*SA1019" "${SC_OUT}" # Only ignore the following deprecated types/fields/functions. diff --git a/vendor/k8s.io/klog/v2/contextual.go b/vendor/k8s.io/klog/v2/contextual.go index 2428963c0e267..005513f2a7696 100644 --- a/vendor/k8s.io/klog/v2/contextual.go +++ b/vendor/k8s.io/klog/v2/contextual.go @@ -70,11 +70,14 @@ func SetLogger(logger logr.Logger) { // routing log entries through klogr into klog and then into the actual Logger // backend. func SetLoggerWithOptions(logger logr.Logger, opts ...LoggerOption) { - logging.logger = &logger logging.loggerOptions = loggerOptions{} for _, opt := range opts { opt(&logging.loggerOptions) } + logging.logger = &logWriter{ + Logger: logger, + writeKlogBuffer: logging.loggerOptions.writeKlogBuffer, + } } // ContextualLogger determines whether the logger passed to @@ -93,6 +96,22 @@ func FlushLogger(flush func()) LoggerOption { } } +// WriteKlogBuffer sets a callback that will be invoked by klog to write output +// produced by non-structured log calls like Infof. +// +// The buffer will contain exactly the same data that klog normally would write +// into its own output stream(s). In particular this includes the header, if +// klog is configured to write one. The callback then can divert that data into +// its own output streams. The buffer may or may not end in a line break. +// +// Without such a callback, klog will call the logger's Info or Error method +// with just the message string (i.e. no header). +func WriteKlogBuffer(write func([]byte)) LoggerOption { + return func(o *loggerOptions) { + o.writeKlogBuffer = write + } +} + // LoggerOption implements the functional parameter paradigm for // SetLoggerWithOptions. type LoggerOption func(o *loggerOptions) @@ -100,6 +119,13 @@ type LoggerOption func(o *loggerOptions) type loggerOptions struct { contextualLogger bool flush func() + writeKlogBuffer func([]byte) +} + +// logWriter combines a logger (always set) with a write callback (optional). +type logWriter struct { + Logger + writeKlogBuffer func([]byte) } // ClearLogger removes a backing Logger implementation if one was set earlier @@ -152,7 +178,7 @@ func Background() Logger { if logging.loggerOptions.contextualLogger { // Is non-nil because logging.loggerOptions.contextualLogger is // only true if a logger was set. - return *logging.logger + return logging.logger.Logger } return klogLogger diff --git a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go index ac88682a2c44c..f325ded5e9e62 100644 --- a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go +++ b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go @@ -40,44 +40,33 @@ type Buffer struct { next *Buffer } -// Buffers manages the reuse of individual buffer instances. It is thread-safe. -type Buffers struct { - // mu protects the free list. It is separate from the main mutex - // so buffers can be grabbed and printed to without holding the main lock, - // for better parallelization. - mu sync.Mutex - - // freeList is a list of byte buffers, maintained under mu. - freeList *Buffer +var buffers = sync.Pool{ + New: func() interface{} { + return new(Buffer) + }, } // GetBuffer returns a new, ready-to-use buffer. -func (bl *Buffers) GetBuffer() *Buffer { - bl.mu.Lock() - b := bl.freeList - if b != nil { - bl.freeList = b.next - } - bl.mu.Unlock() - if b == nil { - b = new(Buffer) - } else { - b.next = nil - b.Reset() - } +func GetBuffer() *Buffer { + b := buffers.Get().(*Buffer) + b.Reset() return b } // PutBuffer returns a buffer to the free list. -func (bl *Buffers) PutBuffer(b *Buffer) { +func PutBuffer(b *Buffer) { if b.Len() >= 256 { - // Let big buffers die a natural death. + // Let big buffers die a natural death, without relying on + // sync.Pool behavior. The documentation implies that items may + // get deallocated while stored there ("If the Pool holds the + // only reference when this [= be removed automatically] + // happens, the item might be deallocated."), but + // https://github.com/golang/go/issues/23199 leans more towards + // having such a size limit. return } - bl.mu.Lock() - b.next = bl.freeList - bl.freeList = b - bl.mu.Unlock() + + buffers.Put(b) } // Some custom tiny helper functions to print the log header efficiently. @@ -121,7 +110,8 @@ func (buf *Buffer) someDigits(i, d int) int { return copy(buf.Tmp[i:], buf.Tmp[j:]) } -// FormatHeader formats a log header using the provided file name and line number. +// FormatHeader formats a log header using the provided file name and line number +// and writes it into the buffer. func (buf *Buffer) FormatHeader(s severity.Severity, file string, line int, now time.Time) { if line < 0 { line = 0 // not a real line number, but acceptable to someDigits @@ -157,3 +147,30 @@ func (buf *Buffer) FormatHeader(s severity.Severity, file string, line int, now buf.Tmp[n+2] = ' ' buf.Write(buf.Tmp[:n+3]) } + +// SprintHeader formats a log header and returns a string. This is a simpler +// version of FormatHeader for use in ktesting. +func (buf *Buffer) SprintHeader(s severity.Severity, now time.Time) string { + if s > severity.FatalLog { + s = severity.InfoLog // for safety. + } + + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + _, month, day := now.Date() + hour, minute, second := now.Clock() + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + buf.Tmp[0] = severity.Char[s] + buf.twoDigits(1, int(month)) + buf.twoDigits(3, day) + buf.Tmp[5] = ' ' + buf.twoDigits(6, hour) + buf.Tmp[8] = ':' + buf.twoDigits(9, minute) + buf.Tmp[11] = ':' + buf.twoDigits(12, second) + buf.Tmp[14] = '.' + buf.nDigits(6, 15, now.Nanosecond()/1000, '0') + buf.Tmp[21] = ']' + return string(buf.Tmp[:22]) +} diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go index ad6bf1116538a..1dc81a15fa62f 100644 --- a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go @@ -24,6 +24,10 @@ import ( "github.com/go-logr/logr" ) +type textWriter interface { + WriteText(*bytes.Buffer) +} + // WithValues implements LogSink.WithValues. The old key/value pairs are // assumed to be well-formed, the new ones are checked and padded if // necessary. It returns a new slice. @@ -91,11 +95,66 @@ func MergeKVs(first, second []interface{}) []interface{} { return merged } +type Formatter struct { + AnyToStringHook AnyToStringFunc +} + +type AnyToStringFunc func(v interface{}) string + +// MergeKVsInto is a variant of MergeKVs which directly formats the key/value +// pairs into a buffer. +func (f Formatter) MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) { + if len(first) == 0 && len(second) == 0 { + // Nothing to do at all. + return + } + + if len(first) == 0 && len(second)%2 == 0 { + // Nothing to be overridden, second slice is well-formed + // and can be used directly. + for i := 0; i < len(second); i += 2 { + f.KVFormat(b, second[i], second[i+1]) + } + return + } + + // Determine which keys are in the second slice so that we can skip + // them when iterating over the first one. The code intentionally + // favors performance over completeness: we assume that keys are string + // constants and thus compare equal when the string values are equal. A + // string constant being overridden by, for example, a fmt.Stringer is + // not handled. + overrides := map[interface{}]bool{} + for i := 0; i < len(second); i += 2 { + overrides[second[i]] = true + } + for i := 0; i < len(first); i += 2 { + key := first[i] + if overrides[key] { + continue + } + f.KVFormat(b, key, first[i+1]) + } + // Round down. + l := len(second) + l = l / 2 * 2 + for i := 1; i < l; i += 2 { + f.KVFormat(b, second[i-1], second[i]) + } + if len(second)%2 == 1 { + f.KVFormat(b, second[len(second)-1], missingValue) + } +} + +func MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) { + Formatter{}.MergeAndFormatKVs(b, first, second) +} + const missingValue = "(MISSING)" // KVListFormat serializes all key/value pairs into the provided buffer. // A space gets inserted before the first pair and between each pair. -func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { +func (f Formatter) KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { for i := 0; i < len(keysAndValues); i += 2 { var v interface{} k := keysAndValues[i] @@ -104,69 +163,93 @@ func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { } else { v = missingValue } - b.WriteByte(' ') - // Keys are assumed to be well-formed according to - // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments - // for the sake of performance. Keys with spaces, - // special characters, etc. will break parsing. - if sK, ok := k.(string); ok { - // Avoid one allocation when the key is a string, which - // normally it should be. - b.WriteString(sK) - } else { - b.WriteString(fmt.Sprintf("%s", k)) - } + f.KVFormat(b, k, v) + } +} + +func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { + Formatter{}.KVListFormat(b, keysAndValues...) +} - // The type checks are sorted so that more frequently used ones - // come first because that is then faster in the common - // cases. In Kubernetes, ObjectRef (a Stringer) is more common - // than plain strings - // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). - switch v := v.(type) { - case fmt.Stringer: - writeStringValue(b, true, StringerToString(v)) +// KVFormat serializes one key/value pair into the provided buffer. +// A space gets inserted before the pair. +func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) { + b.WriteByte(' ') + // Keys are assumed to be well-formed according to + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments + // for the sake of performance. Keys with spaces, + // special characters, etc. will break parsing. + if sK, ok := k.(string); ok { + // Avoid one allocation when the key is a string, which + // normally it should be. + b.WriteString(sK) + } else { + b.WriteString(fmt.Sprintf("%s", k)) + } + + // The type checks are sorted so that more frequently used ones + // come first because that is then faster in the common + // cases. In Kubernetes, ObjectRef (a Stringer) is more common + // than plain strings + // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). + switch v := v.(type) { + case textWriter: + writeTextWriterValue(b, v) + case fmt.Stringer: + writeStringValue(b, true, StringerToString(v)) + case string: + writeStringValue(b, true, v) + case error: + writeStringValue(b, true, ErrorToString(v)) + case logr.Marshaler: + value := MarshalerToValue(v) + // A marshaler that returns a string is useful for + // delayed formatting of complex values. We treat this + // case like a normal string. This is useful for + // multi-line support. + // + // We could do this by recursively formatting a value, + // but that comes with the risk of infinite recursion + // if a marshaler returns itself. Instead we call it + // only once and rely on it returning the intended + // value directly. + switch value := value.(type) { case string: - writeStringValue(b, true, v) - case error: - writeStringValue(b, true, ErrorToString(v)) - case logr.Marshaler: - value := MarshalerToValue(v) - // A marshaler that returns a string is useful for - // delayed formatting of complex values. We treat this - // case like a normal string. This is useful for - // multi-line support. - // - // We could do this by recursively formatting a value, - // but that comes with the risk of infinite recursion - // if a marshaler returns itself. Instead we call it - // only once and rely on it returning the intended - // value directly. - switch value := value.(type) { - case string: - writeStringValue(b, true, value) - default: - writeStringValue(b, false, fmt.Sprintf("%+v", value)) - } - case []byte: - // In https://github.com/kubernetes/klog/pull/237 it was decided - // to format byte slices with "%+q". The advantages of that are: - // - readable output if the bytes happen to be printable - // - non-printable bytes get represented as unicode escape - // sequences (\uxxxx) - // - // The downsides are that we cannot use the faster - // strconv.Quote here and that multi-line output is not - // supported. If developers know that a byte array is - // printable and they want multi-line output, they can - // convert the value to string before logging it. - b.WriteByte('=') - b.WriteString(fmt.Sprintf("%+q", v)) + writeStringValue(b, true, value) default: - writeStringValue(b, false, fmt.Sprintf("%+v", v)) + writeStringValue(b, false, f.AnyToString(value)) } + case []byte: + // In https://github.com/kubernetes/klog/pull/237 it was decided + // to format byte slices with "%+q". The advantages of that are: + // - readable output if the bytes happen to be printable + // - non-printable bytes get represented as unicode escape + // sequences (\uxxxx) + // + // The downsides are that we cannot use the faster + // strconv.Quote here and that multi-line output is not + // supported. If developers know that a byte array is + // printable and they want multi-line output, they can + // convert the value to string before logging it. + b.WriteByte('=') + b.WriteString(fmt.Sprintf("%+q", v)) + default: + writeStringValue(b, false, f.AnyToString(v)) } } +func KVFormat(b *bytes.Buffer, k, v interface{}) { + Formatter{}.KVFormat(b, k, v) +} + +// AnyToString is the historic fallback formatter. +func (f Formatter) AnyToString(v interface{}) string { + if f.AnyToStringHook != nil { + return f.AnyToStringHook(v) + } + return fmt.Sprintf("%+v", v) +} + // StringerToString converts a Stringer to a string, // handling panics if they occur. func StringerToString(s fmt.Stringer) (ret string) { @@ -203,6 +286,16 @@ func ErrorToString(err error) (ret string) { return } +func writeTextWriterValue(b *bytes.Buffer, v textWriter) { + b.WriteRune('=') + defer func() { + if err := recover(); err != nil { + fmt.Fprintf(b, `""`, err) + } + }() + v.WriteText(b) +} + func writeStringValue(b *bytes.Buffer, quote bool, v string) { data := []byte(v) index := bytes.IndexByte(data, '\n') diff --git a/vendor/k8s.io/klog/v2/k8s_references.go b/vendor/k8s.io/klog/v2/k8s_references.go index 2c218f698c7a4..ecd3f8b690339 100644 --- a/vendor/k8s.io/klog/v2/k8s_references.go +++ b/vendor/k8s.io/klog/v2/k8s_references.go @@ -17,8 +17,10 @@ limitations under the License. package klog import ( + "bytes" "fmt" "reflect" + "strings" "github.com/go-logr/logr" ) @@ -31,11 +33,30 @@ type ObjectRef struct { func (ref ObjectRef) String() string { if ref.Namespace != "" { - return fmt.Sprintf("%s/%s", ref.Namespace, ref.Name) + var builder strings.Builder + builder.Grow(len(ref.Namespace) + len(ref.Name) + 1) + builder.WriteString(ref.Namespace) + builder.WriteRune('/') + builder.WriteString(ref.Name) + return builder.String() } return ref.Name } +func (ref ObjectRef) WriteText(out *bytes.Buffer) { + out.WriteRune('"') + ref.writeUnquoted(out) + out.WriteRune('"') +} + +func (ref ObjectRef) writeUnquoted(out *bytes.Buffer) { + if ref.Namespace != "" { + out.WriteString(ref.Namespace) + out.WriteRune('/') + } + out.WriteString(ref.Name) +} + // MarshalLog ensures that loggers with support for structured output will log // as a struct by removing the String method via a custom type. func (ref ObjectRef) MarshalLog() interface{} { @@ -117,31 +138,31 @@ var _ fmt.Stringer = kobjSlice{} var _ logr.Marshaler = kobjSlice{} func (ks kobjSlice) String() string { - objectRefs, err := ks.process() - if err != nil { - return err.Error() + objectRefs, errStr := ks.process() + if errStr != "" { + return errStr } return fmt.Sprintf("%v", objectRefs) } func (ks kobjSlice) MarshalLog() interface{} { - objectRefs, err := ks.process() - if err != nil { - return err.Error() + objectRefs, errStr := ks.process() + if errStr != "" { + return errStr } return objectRefs } -func (ks kobjSlice) process() ([]interface{}, error) { +func (ks kobjSlice) process() (objs []interface{}, err string) { s := reflect.ValueOf(ks.arg) switch s.Kind() { case reflect.Invalid: // nil parameter, print as nil. - return nil, nil + return nil, "" case reflect.Slice: // Okay, handle below. default: - return nil, fmt.Errorf("", ks.arg) + return nil, fmt.Sprintf("", ks.arg) } objectRefs := make([]interface{}, 0, s.Len()) for i := 0; i < s.Len(); i++ { @@ -151,8 +172,41 @@ func (ks kobjSlice) process() ([]interface{}, error) { } else if v, ok := item.(KMetadata); ok { objectRefs = append(objectRefs, KObj(v)) } else { - return nil, fmt.Errorf("", item) + return nil, fmt.Sprintf("", item) + } + } + return objectRefs, "" +} + +var nilToken = []byte("") + +func (ks kobjSlice) WriteText(out *bytes.Buffer) { + s := reflect.ValueOf(ks.arg) + switch s.Kind() { + case reflect.Invalid: + // nil parameter, print as empty slice. + out.WriteString("[]") + return + case reflect.Slice: + // Okay, handle below. + default: + fmt.Fprintf(out, `""`, ks.arg) + return + } + out.Write([]byte{'['}) + defer out.Write([]byte{']'}) + for i := 0; i < s.Len(); i++ { + if i > 0 { + out.Write([]byte{' '}) + } + item := s.Index(i).Interface() + if item == nil { + out.Write(nilToken) + } else if v, ok := item.(KMetadata); ok { + KObj(v).writeUnquoted(out) + } else { + fmt.Fprintf(out, "", item) + return } } - return objectRefs, nil } diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 1bd11b6754ffa..466eeaf265b54 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -91,8 +91,6 @@ import ( "sync/atomic" "time" - "github.com/go-logr/logr" - "k8s.io/klog/v2/internal/buffer" "k8s.io/klog/v2/internal/clock" "k8s.io/klog/v2/internal/dbg" @@ -453,7 +451,7 @@ type settings struct { // logger is the global Logger chosen by users of klog, nil if // none is available. - logger *Logger + logger *logWriter // loggerOptions contains the options that were supplied for // globalLogger. @@ -525,6 +523,11 @@ func (s settings) deepCopy() settings { } s.vmodule.filter = filter + if s.logger != nil { + logger := *s.logger + s.logger = &logger + } + return s } @@ -532,11 +535,6 @@ func (s settings) deepCopy() settings { type loggingT struct { settings - // bufferCache maintains the free list. It uses its own mutex - // so buffers can be grabbed and printed to without holding the main lock, - // for better parallelization. - bufferCache buffer.Buffers - // flushD holds a flushDaemon that frequently flushes log file buffers. // Uses its own mutex. flushD *flushDaemon @@ -664,7 +662,7 @@ func (l *loggingT) header(s severity.Severity, depth int) (*buffer.Buffer, strin // formatHeader formats a log header using the provided file name and line number. func (l *loggingT) formatHeader(s severity.Severity, file string, line int) *buffer.Buffer { - buf := l.bufferCache.GetBuffer() + buf := buffer.GetBuffer() if l.skipHeaders { return buf } @@ -673,17 +671,18 @@ func (l *loggingT) formatHeader(s severity.Severity, file string, line int) *buf return buf } -func (l *loggingT) println(s severity.Severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { +func (l *loggingT) println(s severity.Severity, logger *logWriter, filter LogFilter, args ...interface{}) { l.printlnDepth(s, logger, filter, 1, args...) } -func (l *loggingT) printlnDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, args ...interface{}) { +func (l *loggingT) printlnDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) { buf, file, line := l.header(s, depth) - // if logger is set, we clear the generated header as we rely on the backing - // logger implementation to print headers - if logger != nil { - l.bufferCache.PutBuffer(buf) - buf = l.bufferCache.GetBuffer() + // If a logger is set and doesn't support writing a formatted buffer, + // we clear the generated header as we rely on the backing + // logger implementation to print headers. + if logger != nil && logger.writeKlogBuffer == nil { + buffer.PutBuffer(buf) + buf = buffer.GetBuffer() } if filter != nil { args = filter.Filter(args) @@ -692,17 +691,18 @@ func (l *loggingT) printlnDepth(s severity.Severity, logger *logr.Logger, filter l.output(s, logger, buf, depth, file, line, false) } -func (l *loggingT) print(s severity.Severity, logger *logr.Logger, filter LogFilter, args ...interface{}) { +func (l *loggingT) print(s severity.Severity, logger *logWriter, filter LogFilter, args ...interface{}) { l.printDepth(s, logger, filter, 1, args...) } -func (l *loggingT) printDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, args ...interface{}) { +func (l *loggingT) printDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) { buf, file, line := l.header(s, depth) - // if logr is set, we clear the generated header as we rely on the backing - // logr implementation to print headers - if logger != nil { - l.bufferCache.PutBuffer(buf) - buf = l.bufferCache.GetBuffer() + // If a logger is set and doesn't support writing a formatted buffer, + // we clear the generated header as we rely on the backing + // logger implementation to print headers. + if logger != nil && logger.writeKlogBuffer == nil { + buffer.PutBuffer(buf) + buf = buffer.GetBuffer() } if filter != nil { args = filter.Filter(args) @@ -714,17 +714,18 @@ func (l *loggingT) printDepth(s severity.Severity, logger *logr.Logger, filter L l.output(s, logger, buf, depth, file, line, false) } -func (l *loggingT) printf(s severity.Severity, logger *logr.Logger, filter LogFilter, format string, args ...interface{}) { +func (l *loggingT) printf(s severity.Severity, logger *logWriter, filter LogFilter, format string, args ...interface{}) { l.printfDepth(s, logger, filter, 1, format, args...) } -func (l *loggingT) printfDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, format string, args ...interface{}) { +func (l *loggingT) printfDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, format string, args ...interface{}) { buf, file, line := l.header(s, depth) - // if logr is set, we clear the generated header as we rely on the backing - // logr implementation to print headers - if logger != nil { - l.bufferCache.PutBuffer(buf) - buf = l.bufferCache.GetBuffer() + // If a logger is set and doesn't support writing a formatted buffer, + // we clear the generated header as we rely on the backing + // logger implementation to print headers. + if logger != nil && logger.writeKlogBuffer == nil { + buffer.PutBuffer(buf) + buf = buffer.GetBuffer() } if filter != nil { format, args = filter.FilterF(format, args) @@ -739,13 +740,14 @@ func (l *loggingT) printfDepth(s severity.Severity, logger *logr.Logger, filter // printWithFileLine behaves like print but uses the provided file and line number. If // alsoLogToStderr is true, the log message always appears on standard error; it // will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity.Severity, logger *logr.Logger, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { +func (l *loggingT) printWithFileLine(s severity.Severity, logger *logWriter, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) { buf := l.formatHeader(s, file, line) - // if logr is set, we clear the generated header as we rely on the backing - // logr implementation to print headers - if logger != nil { - l.bufferCache.PutBuffer(buf) - buf = l.bufferCache.GetBuffer() + // If a logger is set and doesn't support writing a formatted buffer, + // we clear the generated header as we rely on the backing + // logger implementation to print headers. + if logger != nil && logger.writeKlogBuffer == nil { + buffer.PutBuffer(buf) + buf = buffer.GetBuffer() } if filter != nil { args = filter.Filter(args) @@ -758,7 +760,7 @@ func (l *loggingT) printWithFileLine(s severity.Severity, logger *logr.Logger, f } // if loggr is specified, will call loggr.Error, otherwise output with logging module. -func (l *loggingT) errorS(err error, logger *logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { +func (l *loggingT) errorS(err error, logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) } @@ -770,7 +772,7 @@ func (l *loggingT) errorS(err error, logger *logr.Logger, filter LogFilter, dept } // if loggr is specified, will call loggr.Info, otherwise output with logging module. -func (l *loggingT) infoS(logger *logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { +func (l *loggingT) infoS(logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) { if filter != nil { msg, keysAndValues = filter.FilterS(msg, keysAndValues) } @@ -785,7 +787,7 @@ func (l *loggingT) infoS(logger *logr.Logger, filter LogFilter, depth int, msg s // set log severity by s func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, keysAndValues ...interface{}) { // Only create a new buffer if we don't have one cached. - b := l.bufferCache.GetBuffer() + b := buffer.GetBuffer() // The message is always quoted, even if it contains line breaks. // If developers want multi-line output, they should use a small, fixed // message and put the multi-line output into a value. @@ -796,7 +798,7 @@ func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, serialize.KVListFormat(&b.Buffer, keysAndValues...) l.printDepth(s, logging.logger, nil, depth+1, &b.Buffer) // Make the buffer available for reuse. - l.bufferCache.PutBuffer(b) + buffer.PutBuffer(b) } // redirectBuffer is used to set an alternate destination for the logs @@ -851,7 +853,7 @@ func LogToStderr(stderr bool) { } // output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buffer, depth int, file string, line int, alsoToStderr bool) { +func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Buffer, depth int, file string, line int, alsoToStderr bool) { var isLocked = true l.mu.Lock() defer func() { @@ -867,13 +869,17 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf } } data := buf.Bytes() - if log != nil { - // TODO: set 'severity' and caller information as structured log info - // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line} - if s == severity.ErrorLog { - logging.logger.WithCallDepth(depth+3).Error(nil, string(data)) + if logger != nil { + if logger.writeKlogBuffer != nil { + logger.writeKlogBuffer(data) } else { - log.WithCallDepth(depth + 3).Info(string(data)) + // TODO: set 'severity' and caller information as structured log info + // keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line} + if s == severity.ErrorLog { + logger.WithCallDepth(depth+3).Error(nil, string(data)) + } else { + logger.WithCallDepth(depth + 3).Info(string(data)) + } } } else if l.toStderr { os.Stderr.Write(data) @@ -948,7 +954,7 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf timeoutFlush(ExitFlushTimeout) OsExit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. } - l.bufferCache.PutBuffer(buf) + buffer.PutBuffer(buf) if stats := severityStats[s]; stats != nil { atomic.AddInt64(&stats.lines, 1) @@ -1282,7 +1288,7 @@ func (l *loggingT) setV(pc uintptr) Level { // See the documentation of V for more information. type Verbose struct { enabled bool - logr *logr.Logger + logger *logWriter } func newVerbose(level Level, b bool) Verbose { @@ -1290,7 +1296,7 @@ func newVerbose(level Level, b bool) Verbose { return Verbose{b, nil} } v := logging.logger.V(int(level)) - return Verbose{b, &v} + return Verbose{b, &logWriter{Logger: v, writeKlogBuffer: logging.loggerOptions.writeKlogBuffer}} } // V reports whether verbosity at the call site is at least the requested level. @@ -1313,6 +1319,13 @@ func newVerbose(level Level, b bool) Verbose { // less than or equal to the value of the -vmodule pattern matching the source file // containing the call. func V(level Level) Verbose { + return VDepth(1, level) +} + +// VDepth is a variant of V that accepts a number of stack frames that will be +// skipped when checking the -vmodule patterns. VDepth(0) is equivalent to +// V(). +func VDepth(depth int, level Level) Verbose { // This function tries hard to be cheap unless there's work to do. // The fast path is two atomic loads and compares. @@ -1329,7 +1342,7 @@ func V(level Level) Verbose { // but if V logging is enabled we're slow anyway. logging.mu.Lock() defer logging.mu.Unlock() - if runtime.Callers(2, logging.pcs[:]) == 0 { + if runtime.Callers(2+depth, logging.pcs[:]) == 0 { return newVerbose(level, false) } // runtime.Callers returns "return PCs", but we want @@ -1357,7 +1370,7 @@ func (v Verbose) Enabled() bool { // See the documentation of V for usage. func (v Verbose) Info(args ...interface{}) { if v.enabled { - logging.print(severity.InfoLog, v.logr, logging.filter, args...) + logging.print(severity.InfoLog, v.logger, logging.filter, args...) } } @@ -1365,7 +1378,7 @@ func (v Verbose) Info(args ...interface{}) { // See the documentation of V for usage. func (v Verbose) InfoDepth(depth int, args ...interface{}) { if v.enabled { - logging.printDepth(severity.InfoLog, v.logr, logging.filter, depth, args...) + logging.printDepth(severity.InfoLog, v.logger, logging.filter, depth, args...) } } @@ -1373,7 +1386,7 @@ func (v Verbose) InfoDepth(depth int, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) Infoln(args ...interface{}) { if v.enabled { - logging.println(severity.InfoLog, v.logr, logging.filter, args...) + logging.println(severity.InfoLog, v.logger, logging.filter, args...) } } @@ -1381,7 +1394,7 @@ func (v Verbose) Infoln(args ...interface{}) { // See the documentation of V for usage. func (v Verbose) InfolnDepth(depth int, args ...interface{}) { if v.enabled { - logging.printlnDepth(severity.InfoLog, v.logr, logging.filter, depth, args...) + logging.printlnDepth(severity.InfoLog, v.logger, logging.filter, depth, args...) } } @@ -1389,7 +1402,7 @@ func (v Verbose) InfolnDepth(depth int, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) Infof(format string, args ...interface{}) { if v.enabled { - logging.printf(severity.InfoLog, v.logr, logging.filter, format, args...) + logging.printf(severity.InfoLog, v.logger, logging.filter, format, args...) } } @@ -1397,7 +1410,7 @@ func (v Verbose) Infof(format string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) InfofDepth(depth int, format string, args ...interface{}) { if v.enabled { - logging.printfDepth(severity.InfoLog, v.logr, logging.filter, depth, format, args...) + logging.printfDepth(severity.InfoLog, v.logger, logging.filter, depth, format, args...) } } @@ -1405,7 +1418,7 @@ func (v Verbose) InfofDepth(depth int, format string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) InfoS(msg string, keysAndValues ...interface{}) { if v.enabled { - logging.infoS(v.logr, logging.filter, 0, msg, keysAndValues...) + logging.infoS(v.logger, logging.filter, 0, msg, keysAndValues...) } } @@ -1419,14 +1432,14 @@ func InfoSDepth(depth int, msg string, keysAndValues ...interface{}) { // See the documentation of V for usage. func (v Verbose) InfoSDepth(depth int, msg string, keysAndValues ...interface{}) { if v.enabled { - logging.infoS(v.logr, logging.filter, depth, msg, keysAndValues...) + logging.infoS(v.logger, logging.filter, depth, msg, keysAndValues...) } } // Deprecated: Use ErrorS instead. func (v Verbose) Error(err error, msg string, args ...interface{}) { if v.enabled { - logging.errorS(err, v.logr, logging.filter, 0, msg, args...) + logging.errorS(err, v.logger, logging.filter, 0, msg, args...) } } @@ -1434,7 +1447,7 @@ func (v Verbose) Error(err error, msg string, args ...interface{}) { // See the documentation of V for usage. func (v Verbose) ErrorS(err error, msg string, keysAndValues ...interface{}) { if v.enabled { - logging.errorS(err, v.logr, logging.filter, 0, msg, keysAndValues...) + logging.errorS(err, v.logger, logging.filter, 0, msg, keysAndValues...) } } diff --git a/vendor/k8s.io/klog/v2/klogr.go b/vendor/k8s.io/klog/v2/klogr.go index 027a4014af5bc..15de00e21fb33 100644 --- a/vendor/k8s.io/klog/v2/klogr.go +++ b/vendor/k8s.io/klog/v2/klogr.go @@ -42,19 +42,21 @@ func (l *klogger) Init(info logr.RuntimeInfo) { l.callDepth += info.CallDepth } -func (l klogger) Info(level int, msg string, kvList ...interface{}) { +func (l *klogger) Info(level int, msg string, kvList ...interface{}) { merged := serialize.MergeKVs(l.values, kvList) if l.prefix != "" { msg = l.prefix + ": " + msg } - V(Level(level)).InfoSDepth(l.callDepth+1, msg, merged...) + // Skip this function. + VDepth(l.callDepth+1, Level(level)).InfoSDepth(l.callDepth+1, msg, merged...) } -func (l klogger) Enabled(level int) bool { - return V(Level(level)).Enabled() +func (l *klogger) Enabled(level int) bool { + // Skip this function and logr.Logger.Info where Enabled is called. + return VDepth(l.callDepth+2, Level(level)).Enabled() } -func (l klogger) Error(err error, msg string, kvList ...interface{}) { +func (l *klogger) Error(err error, msg string, kvList ...interface{}) { merged := serialize.MergeKVs(l.values, kvList) if l.prefix != "" { msg = l.prefix + ": " + msg diff --git a/vendor/modules.txt b/vendor/modules.txt index f7ee0ae13e520..861d0dddc2d61 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,21 +1,32 @@ -# cloud.google.com/go v0.102.1 -## explicit; go 1.15 +# cloud.google.com/go v0.110.0 +## explicit; go 1.19 cloud.google.com/go -cloud.google.com/go/internal/version -# cloud.google.com/go/compute v1.7.0 -## explicit; go 1.15 +# cloud.google.com/go/compute v1.18.0 +## explicit; go 1.19 +cloud.google.com/go/compute/internal +# cloud.google.com/go/compute/metadata v0.2.3 +## explicit; go 1.19 cloud.google.com/go/compute/metadata -# cloud.google.com/go/logging v1.4.2 -## explicit; go 1.11 +# cloud.google.com/go/logging v1.7.0 +## explicit; go 1.19 cloud.google.com/go/logging cloud.google.com/go/logging/apiv2 +cloud.google.com/go/logging/apiv2/loggingpb cloud.google.com/go/logging/internal +# cloud.google.com/go/longrunning v0.4.1 +## explicit; go 1.19 +cloud.google.com/go/longrunning +cloud.google.com/go/longrunning/autogen +cloud.google.com/go/longrunning/autogen/longrunningpb # code.cloudfoundry.org/clock v1.0.0 ## explicit code.cloudfoundry.org/clock -# github.com/AdaLogics/go-fuzz-headers v0.0.0-20221118232415-3345c89a7c72 +# github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 ## explicit; go 1.18 github.com/AdaLogics/go-fuzz-headers +# github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20221215162035-5330a85ea652 +## explicit; go 1.18 +github.com/AdamKorcz/go-118-fuzz-build/testing # github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 ## explicit; go 1.16 github.com/Azure/go-ansiterm @@ -23,18 +34,19 @@ github.com/Azure/go-ansiterm/winterm # github.com/Graylog2/go-gelf v0.0.0-20191017102106-1550ee647df0 ## explicit github.com/Graylog2/go-gelf/gelf -# github.com/Microsoft/go-winio v0.5.2 -## explicit; go 1.13 +# github.com/Microsoft/go-winio v0.6.0 +## explicit; go 1.17 github.com/Microsoft/go-winio github.com/Microsoft/go-winio/backuptar +github.com/Microsoft/go-winio/internal/socket github.com/Microsoft/go-winio/pkg/etw github.com/Microsoft/go-winio/pkg/etwlogrus github.com/Microsoft/go-winio/pkg/fs github.com/Microsoft/go-winio/pkg/guid -github.com/Microsoft/go-winio/pkg/security +github.com/Microsoft/go-winio/tools/mkwinsyscall github.com/Microsoft/go-winio/vhd -# github.com/Microsoft/hcsshim v0.9.7 -## explicit; go 1.13 +# github.com/Microsoft/hcsshim v0.10.0-rc.7 +## explicit; go 1.18 github.com/Microsoft/hcsshim github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options github.com/Microsoft/hcsshim/computestorage @@ -51,12 +63,15 @@ github.com/Microsoft/hcsshim/internal/jobobject github.com/Microsoft/hcsshim/internal/log github.com/Microsoft/hcsshim/internal/logfields github.com/Microsoft/hcsshim/internal/longpath +github.com/Microsoft/hcsshim/internal/memory github.com/Microsoft/hcsshim/internal/mergemaps github.com/Microsoft/hcsshim/internal/oc +github.com/Microsoft/hcsshim/internal/protocol/guestrequest github.com/Microsoft/hcsshim/internal/queue github.com/Microsoft/hcsshim/internal/regstate github.com/Microsoft/hcsshim/internal/runhcs github.com/Microsoft/hcsshim/internal/safefile +github.com/Microsoft/hcsshim/internal/security github.com/Microsoft/hcsshim/internal/timeout github.com/Microsoft/hcsshim/internal/vmcompute github.com/Microsoft/hcsshim/internal/wclayer @@ -168,10 +183,10 @@ github.com/beorn7/perks/quantile # github.com/bsphere/le_go v0.0.0-20200109081728-fc06dab2caa8 ## explicit; go 1.12 github.com/bsphere/le_go -# github.com/cenkalti/backoff/v4 v4.1.2 -## explicit; go 1.13 +# github.com/cenkalti/backoff/v4 v4.2.0 +## explicit; go 1.18 github.com/cenkalti/backoff/v4 -# github.com/cespare/xxhash/v2 v2.1.2 +# github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 # github.com/cilium/ebpf v0.9.1 @@ -203,7 +218,7 @@ github.com/cloudflare/cfssl/signer/local # github.com/container-storage-interface/spec v1.5.0 ## explicit; go 1.16 github.com/container-storage-interface/spec/lib/go/csi -# github.com/containerd/cgroups v1.0.4 +# github.com/containerd/cgroups v1.1.0 ## explicit; go 1.17 github.com/containerd/cgroups/stats/v1 # github.com/containerd/cgroups/v3 v3.0.1 @@ -216,10 +231,12 @@ github.com/containerd/cgroups/v3/cgroup2/stats # github.com/containerd/console v1.0.3 ## explicit; go 1.13 github.com/containerd/console -# github.com/containerd/containerd v1.6.19 -## explicit; go 1.17 +# github.com/containerd/containerd v1.7.0-rc.2 +## explicit; go 1.19 github.com/containerd/containerd github.com/containerd/containerd/api/events +github.com/containerd/containerd/api/runtime/sandbox/v1 +github.com/containerd/containerd/api/runtime/task/v2 github.com/containerd/containerd/api/services/containers/v1 github.com/containerd/containerd/api/services/content/v1 github.com/containerd/containerd/api/services/diff/v1 @@ -228,12 +245,16 @@ github.com/containerd/containerd/api/services/images/v1 github.com/containerd/containerd/api/services/introspection/v1 github.com/containerd/containerd/api/services/leases/v1 github.com/containerd/containerd/api/services/namespaces/v1 +github.com/containerd/containerd/api/services/sandbox/v1 github.com/containerd/containerd/api/services/snapshots/v1 +github.com/containerd/containerd/api/services/streaming/v1 github.com/containerd/containerd/api/services/tasks/v1 +github.com/containerd/containerd/api/services/transfer/v1 github.com/containerd/containerd/api/services/ttrpc/events/v1 github.com/containerd/containerd/api/services/version/v1 github.com/containerd/containerd/api/types github.com/containerd/containerd/api/types/task +github.com/containerd/containerd/api/types/transfer github.com/containerd/containerd/archive github.com/containerd/containerd/archive/compression github.com/containerd/containerd/cio @@ -266,16 +287,28 @@ github.com/containerd/containerd/namespaces github.com/containerd/containerd/oci github.com/containerd/containerd/pkg/apparmor github.com/containerd/containerd/pkg/cap +github.com/containerd/containerd/pkg/cleanup github.com/containerd/containerd/pkg/dialer +github.com/containerd/containerd/pkg/epoch github.com/containerd/containerd/pkg/kmutex +github.com/containerd/containerd/pkg/randutil github.com/containerd/containerd/pkg/runtimeoptions/v1 github.com/containerd/containerd/pkg/seccomp github.com/containerd/containerd/pkg/shutdown github.com/containerd/containerd/pkg/snapshotters +github.com/containerd/containerd/pkg/streaming +github.com/containerd/containerd/pkg/transfer +github.com/containerd/containerd/pkg/transfer/proxy +github.com/containerd/containerd/pkg/transfer/streaming github.com/containerd/containerd/pkg/ttrpcutil +github.com/containerd/containerd/pkg/unpack github.com/containerd/containerd/pkg/userns github.com/containerd/containerd/platforms github.com/containerd/containerd/plugin +github.com/containerd/containerd/protobuf +github.com/containerd/containerd/protobuf/plugin +github.com/containerd/containerd/protobuf/proto +github.com/containerd/containerd/protobuf/types github.com/containerd/containerd/reference github.com/containerd/containerd/reference/docker github.com/containerd/containerd/remotes @@ -287,7 +320,8 @@ github.com/containerd/containerd/rootfs github.com/containerd/containerd/runtime/linux/runctypes github.com/containerd/containerd/runtime/v2/runc/options github.com/containerd/containerd/runtime/v2/shim -github.com/containerd/containerd/runtime/v2/task +github.com/containerd/containerd/sandbox +github.com/containerd/containerd/sandbox/proxy github.com/containerd/containerd/services github.com/containerd/containerd/services/content/contentserver github.com/containerd/containerd/services/introspection @@ -297,6 +331,7 @@ github.com/containerd/containerd/snapshots/overlay/overlayutils github.com/containerd/containerd/snapshots/proxy github.com/containerd/containerd/sys github.com/containerd/containerd/sys/reaper +github.com/containerd/containerd/tracing github.com/containerd/containerd/version # github.com/containerd/continuity v0.3.0 ## explicit; go 1.17 @@ -307,8 +342,8 @@ github.com/containerd/continuity/sysx # github.com/containerd/fifo v1.1.0 ## explicit; go 1.18 github.com/containerd/fifo -# github.com/containerd/go-cni v1.1.6 -## explicit; go 1.17 +# github.com/containerd/go-cni v1.1.9 +## explicit; go 1.19 github.com/containerd/go-cni # github.com/containerd/go-runc v1.0.0 ## explicit; go 1.13 @@ -322,7 +357,7 @@ github.com/containerd/nydus-snapshotter/pkg/errdefs ## explicit; go 1.16 github.com/containerd/stargz-snapshotter/estargz github.com/containerd/stargz-snapshotter/estargz/errorutil -# github.com/containerd/ttrpc v1.1.0 +# github.com/containerd/ttrpc v1.2.0 ## explicit; go 1.13 github.com/containerd/ttrpc # github.com/containerd/typeurl v1.0.2 @@ -331,7 +366,7 @@ github.com/containerd/typeurl # github.com/containerd/typeurl/v2 v2.1.0 ## explicit; go 1.13 github.com/containerd/typeurl/v2 -# github.com/containernetworking/cni v1.1.1 +# github.com/containernetworking/cni v1.1.2 ## explicit; go 1.14 github.com/containernetworking/cni/libcni github.com/containernetworking/cni/pkg/invoke @@ -343,7 +378,7 @@ github.com/containernetworking/cni/pkg/types/create github.com/containernetworking/cni/pkg/types/internal github.com/containernetworking/cni/pkg/utils github.com/containernetworking/cni/pkg/version -# github.com/coreos/go-systemd/v22 v22.4.0 +# github.com/coreos/go-systemd/v22 v22.5.0 ## explicit; go 1.12 github.com/coreos/go-systemd/v22/activation github.com/coreos/go-systemd/v22/daemon @@ -421,7 +456,7 @@ github.com/go-logr/logr/funcr # github.com/go-logr/stdr v1.2.2 ## explicit; go 1.16 github.com/go-logr/stdr -# github.com/godbus/dbus/v5 v5.0.6 +# github.com/godbus/dbus/v5 v5.1.0 ## explicit; go 1.12 github.com/godbus/dbus/v5 # github.com/gofrs/flock v0.8.1 @@ -450,7 +485,6 @@ github.com/golang/gddo/httputil/header github.com/golang/groupcache/lru # github.com/golang/protobuf v1.5.2 ## explicit; go 1.9 -github.com/golang/protobuf/descriptor github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto github.com/golang/protobuf/protoc-gen-go/descriptor @@ -487,12 +521,12 @@ github.com/google/shlex # github.com/google/uuid v1.3.0 ## explicit github.com/google/uuid -# github.com/googleapis/enterprise-certificate-proxy v0.1.0 -## explicit; go 1.18 +# github.com/googleapis/enterprise-certificate-proxy v0.2.3 +## explicit; go 1.19 github.com/googleapis/enterprise-certificate-proxy/client github.com/googleapis/enterprise-certificate-proxy/client/util -# github.com/googleapis/gax-go/v2 v2.4.0 -## explicit; go 1.15 +# github.com/googleapis/gax-go/v2 v2.7.0 +## explicit; go 1.19 github.com/googleapis/gax-go/v2 github.com/googleapis/gax-go/v2/apierror github.com/googleapis/gax-go/v2/apierror/internal/proto @@ -506,11 +540,11 @@ github.com/grpc-ecosystem/go-grpc-middleware # github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 ## explicit github.com/grpc-ecosystem/go-grpc-prometheus -# github.com/grpc-ecosystem/grpc-gateway v1.16.0 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 ## explicit; go 1.14 -github.com/grpc-ecosystem/grpc-gateway/internal -github.com/grpc-ecosystem/grpc-gateway/runtime -github.com/grpc-ecosystem/grpc-gateway/utilities +github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule +github.com/grpc-ecosystem/grpc-gateway/v2/runtime +github.com/grpc-ecosystem/grpc-gateway/v2/utilities # github.com/hashicorp/errwrap v1.1.0 ## explicit github.com/hashicorp/errwrap @@ -539,7 +573,7 @@ github.com/hashicorp/memberlist ## explicit github.com/hashicorp/serf/coordinate github.com/hashicorp/serf/serf -# github.com/imdario/mergo v0.3.12 +# github.com/imdario/mergo v0.3.13 ## explicit; go 1.13 github.com/imdario/mergo # github.com/in-toto/in-toto-golang v0.5.0 @@ -554,8 +588,8 @@ github.com/inconshreveable/mousetrap # github.com/ishidawataru/sctp v0.0.0-20210707070123-9a39160e9062 ## explicit; go 1.12 github.com/ishidawataru/sctp -# github.com/klauspost/compress v1.15.12 -## explicit; go 1.17 +# github.com/klauspost/compress v1.16.0 +## explicit; go 1.18 github.com/klauspost/compress github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 @@ -830,26 +864,25 @@ github.com/morikuni/aec ## explicit; go 1.13 github.com/opencontainers/go-digest github.com/opencontainers/go-digest/digestset -# github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1 -## explicit; go 1.16 +# github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b +## explicit; go 1.17 github.com/opencontainers/image-spec/identity github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 -# github.com/opencontainers/runc v1.1.3 +# github.com/opencontainers/runc v1.1.4 ## explicit; go 1.16 github.com/opencontainers/runc/libcontainer/cgroups github.com/opencontainers/runc/libcontainer/configs github.com/opencontainers/runc/libcontainer/devices github.com/opencontainers/runc/libcontainer/user github.com/opencontainers/runc/libcontainer/userns -# github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 +# github.com/opencontainers/runtime-spec v1.1.0-rc.1 ## explicit github.com/opencontainers/runtime-spec/specs-go -# github.com/opencontainers/selinux v1.10.2 -## explicit; go 1.13 +# github.com/opencontainers/selinux v1.11.0 +## explicit; go 1.19 github.com/opencontainers/selinux/go-selinux github.com/opencontainers/selinux/go-selinux/label -github.com/opencontainers/selinux/pkg/pwalk github.com/opencontainers/selinux/pkg/pwalkdir # github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170 ## explicit; go 1.17 @@ -965,7 +998,7 @@ go.etcd.io/etcd/server/v3/etcdserver/api/snap go.etcd.io/etcd/server/v3/etcdserver/api/snap/snappb go.etcd.io/etcd/server/v3/wal go.etcd.io/etcd/server/v3/wal/walpb -# go.opencensus.io v0.23.0 +# go.opencensus.io v0.24.0 ## explicit; go 1.13 go.opencensus.io go.opencensus.io/internal @@ -973,6 +1006,8 @@ go.opencensus.io/internal/tagencoding go.opencensus.io/metric/metricdata go.opencensus.io/metric/metricproducer go.opencensus.io/plugin/ocgrpc +go.opencensus.io/plugin/ochttp +go.opencensus.io/plugin/ochttp/propagation/b3 go.opencensus.io/resource go.opencensus.io/stats go.opencensus.io/stats/internal @@ -982,8 +1017,8 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0 -## explicit; go 1.16 +# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0 +## explicit; go 1.18 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal # go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.29.0 @@ -992,54 +1027,62 @@ go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace # go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0 ## explicit; go 1.16 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp -# go.opentelemetry.io/otel v1.4.1 -## explicit; go 1.16 +# go.opentelemetry.io/otel v1.12.0 +## explicit; go 1.18 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute go.opentelemetry.io/otel/baggage go.opentelemetry.io/otel/codes +go.opentelemetry.io/otel/exporters/otlp/internal +go.opentelemetry.io/otel/exporters/otlp/internal/envconfig go.opentelemetry.io/otel/internal +go.opentelemetry.io/otel/internal/attribute go.opentelemetry.io/otel/internal/baggage go.opentelemetry.io/otel/internal/global go.opentelemetry.io/otel/propagation +go.opentelemetry.io/otel/semconv/internal +go.opentelemetry.io/otel/semconv/internal/v2 +go.opentelemetry.io/otel/semconv/v1.12.0 +go.opentelemetry.io/otel/semconv/v1.17.0 +go.opentelemetry.io/otel/semconv/v1.17.0/httpconv go.opentelemetry.io/otel/semconv/v1.7.0 -# go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1 -## explicit; go 1.16 +# go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.12.0 +## explicit; go 1.18 go.opentelemetry.io/otel/exporters/otlp/internal/retry -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 -## explicit; go 1.16 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.12.0 +## explicit; go 1.18 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.1 -## explicit; go 1.16 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.12.0 +## explicit; go 1.18 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc -# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.4.1 -## explicit; go 1.16 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.12.0 +## explicit; go 1.18 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp -# go.opentelemetry.io/otel/internal/metric v0.27.0 -## explicit; go 1.16 -go.opentelemetry.io/otel/internal/metric/global -go.opentelemetry.io/otel/internal/metric/registry -# go.opentelemetry.io/otel/metric v0.27.0 -## explicit; go 1.16 +# go.opentelemetry.io/otel/metric v0.34.0 +## explicit; go 1.18 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/global -go.opentelemetry.io/otel/metric/number -go.opentelemetry.io/otel/metric/sdkapi +go.opentelemetry.io/otel/metric/instrument +go.opentelemetry.io/otel/metric/instrument/asyncfloat64 +go.opentelemetry.io/otel/metric/instrument/asyncint64 +go.opentelemetry.io/otel/metric/instrument/syncfloat64 +go.opentelemetry.io/otel/metric/instrument/syncint64 +go.opentelemetry.io/otel/metric/internal/global go.opentelemetry.io/otel/metric/unit -# go.opentelemetry.io/otel/sdk v1.4.1 -## explicit; go 1.16 +# go.opentelemetry.io/otel/sdk v1.12.0 +## explicit; go 1.18 go.opentelemetry.io/otel/sdk/instrumentation go.opentelemetry.io/otel/sdk/internal go.opentelemetry.io/otel/sdk/internal/env go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace go.opentelemetry.io/otel/sdk/trace/tracetest -# go.opentelemetry.io/otel/trace v1.4.1 -## explicit; go 1.16 +# go.opentelemetry.io/otel/trace v1.12.0 +## explicit; go 1.18 go.opentelemetry.io/otel/trace -# go.opentelemetry.io/proto/otlp v0.12.0 +# go.opentelemetry.io/proto/otlp v0.19.0 ## explicit; go 1.14 go.opentelemetry.io/proto/otlp/collector/trace/v1 go.opentelemetry.io/proto/otlp/common/v1 @@ -1079,6 +1122,9 @@ golang.org/x/crypto/pkcs12/internal/rc2 golang.org/x/crypto/salsa20/salsa golang.org/x/crypto/ssh golang.org/x/crypto/ssh/internal/bcrypt_pbkdf +# golang.org/x/mod v0.7.0 +## explicit; go 1.17 +golang.org/x/mod/semver # golang.org/x/net v0.7.0 ## explicit; go 1.17 golang.org/x/net/bpf @@ -1097,7 +1143,7 @@ golang.org/x/net/ipv6 golang.org/x/net/proxy golang.org/x/net/trace golang.org/x/net/websocket -# golang.org/x/oauth2 v0.1.0 +# golang.org/x/oauth2 v0.5.0 ## explicit; go 1.17 golang.org/x/oauth2 golang.org/x/oauth2/authhandler @@ -1111,7 +1157,7 @@ golang.org/x/oauth2/jwt golang.org/x/sync/errgroup golang.org/x/sync/semaphore golang.org/x/sync/syncmap -# golang.org/x/sys v0.5.0 +# golang.org/x/sys v0.6.0 ## explicit; go 1.17 golang.org/x/sys/cpu golang.org/x/sys/execabs @@ -1138,9 +1184,26 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.3.0 ## explicit golang.org/x/time/rate -# google.golang.org/api v0.93.0 -## explicit; go 1.15 +# golang.org/x/tools v0.5.0 +## explicit; go 1.18 +golang.org/x/tools/cmd/stringer +golang.org/x/tools/go/gcexportdata +golang.org/x/tools/go/internal/packagesdriver +golang.org/x/tools/go/packages +golang.org/x/tools/internal/event +golang.org/x/tools/internal/event/core +golang.org/x/tools/internal/event/keys +golang.org/x/tools/internal/event/label +golang.org/x/tools/internal/gcimporter +golang.org/x/tools/internal/gocommand +golang.org/x/tools/internal/packagesinternal +golang.org/x/tools/internal/pkgbits +golang.org/x/tools/internal/typeparams +golang.org/x/tools/internal/typesinternal +# google.golang.org/api v0.110.0 +## explicit; go 1.19 google.golang.org/api/googleapi +google.golang.org/api/googleapi/transport google.golang.org/api/internal google.golang.org/api/internal/impersonate google.golang.org/api/internal/third_party/uritemplates @@ -1150,6 +1213,8 @@ google.golang.org/api/option/internaloption google.golang.org/api/support/bundler google.golang.org/api/transport/cert google.golang.org/api/transport/grpc +google.golang.org/api/transport/http +google.golang.org/api/transport/http/internal/propagation google.golang.org/api/transport/internal/dca # google.golang.org/appengine v1.6.7 ## explicit; go 1.11 @@ -1165,8 +1230,8 @@ google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/socket google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20220706185917-7780775163c4 -## explicit; go 1.15 +# google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 +## explicit; go 1.19 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/distribution @@ -1175,13 +1240,12 @@ google.golang.org/genproto/googleapis/api/label google.golang.org/genproto/googleapis/api/metric google.golang.org/genproto/googleapis/api/monitoredres google.golang.org/genproto/googleapis/logging/type -google.golang.org/genproto/googleapis/logging/v2 google.golang.org/genproto/googleapis/longrunning google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.50.1 +# google.golang.org/grpc v1.53.0 ## explicit; go 1.17 google.golang.org/grpc google.golang.org/grpc/attributes @@ -1300,7 +1364,7 @@ gotest.tools/v3/internal/format gotest.tools/v3/internal/source gotest.tools/v3/poll gotest.tools/v3/skip -# k8s.io/klog/v2 v2.80.1 +# k8s.io/klog/v2 v2.90.1 ## explicit; go 1.13 k8s.io/klog/v2 k8s.io/klog/v2/internal/buffer